blob: a0babdbf71460dda5dc156bfe7b7b9313e0da679 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
Omer Pelegaa473242016-04-20 11:33:02 +030036#include <linux/cpu.h>
mark gross5e0d2a62008-03-04 15:22:08 -080037#include <linux/timer.h>
Dan Williamsdfddb962015-10-09 18:16:46 -040038#include <linux/io.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010040#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030041#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010042#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070043#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100044#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020045#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080046#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070047#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020048#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070049#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090051#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052
Joerg Roedel078e1ee2012-09-26 12:44:43 +020053#include "irq_remapping.h"
54
Fenghua Yu5b6985c2008-10-16 18:02:32 -070055#define ROOT_SIZE VTD_PAGE_SIZE
56#define CONTEXT_SIZE VTD_PAGE_SIZE
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000059#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070061#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070062
63#define IOAPIC_RANGE_START (0xfee00000)
64#define IOAPIC_RANGE_END (0xfeefffff)
65#define IOVA_START_ADDR (0x1000)
66
67#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
68
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080070#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070071
David Woodhouse2ebe3152009-09-19 07:34:04 -070072#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070080
Robin Murphy1b722502015-01-12 17:51:15 +000081/* IO virtual address start page frame number */
82#define IOVA_START_PFN (1)
83
Mark McLoughlinf27be032008-11-20 15:49:43 +000084#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
mark gross5e0d2a62008-03-04 15:22:08 -080085
Andrew Mortondf08cdc2010-09-22 13:05:11 -070086/* page table handling */
87#define LEVEL_STRIDE (9)
88#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020090/*
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
94 * that we support.
95 *
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
99 *
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
102 *
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
105 */
106#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108static inline int agaw_to_level(int agaw)
109{
110 return agaw + 2;
111}
112
113static inline int agaw_to_width(int agaw)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline int width_to_agaw(int width)
119{
Jiang Liu5c645b32014-01-06 14:18:12 +0800120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700121}
122
123static inline unsigned int level_to_offset_bits(int level)
124{
125 return (level - 1) * LEVEL_STRIDE;
126}
127
128static inline int pfn_level_offset(unsigned long pfn, int level)
129{
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131}
132
133static inline unsigned long level_mask(int level)
134{
135 return -1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long level_size(int level)
139{
140 return 1UL << level_to_offset_bits(level);
141}
142
143static inline unsigned long align_to_level(unsigned long pfn, int level)
144{
145 return (pfn + level_size(level) - 1) & level_mask(level);
146}
David Woodhousefd18de52009-05-10 23:57:41 +0100147
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100148static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149{
Jiang Liu5c645b32014-01-06 14:18:12 +0800150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151}
152
David Woodhousedd4e8312009-06-27 16:21:20 +0100153/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156{
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159
160static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161{
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163}
164static inline unsigned long page_to_dma_pfn(struct page *pg)
165{
166 return mm_to_dma_pfn(page_to_pfn(pg));
167}
168static inline unsigned long virt_to_dma_pfn(void *p)
169{
170 return page_to_dma_pfn(virt_to_page(p));
171}
172
Weidong Hand9630fe2008-12-08 11:06:32 +0800173/* global iommu list, set NULL for ignored DMAR units */
174static struct intel_iommu **g_iommus;
175
David Woodhousee0fc7e02009-09-30 09:12:17 -0700176static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000177static int rwbf_quirk;
178
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
182 */
183static int force_on = 0;
Shaohua Libfd20f12017-04-26 09:18:35 -0700184int intel_iommu_tboot_noforce;
Joseph Cihulab7792602011-05-03 00:08:37 -0700185
186/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000187 * 0: Present
188 * 1-11: Reserved
189 * 12-63: Context Ptr (12 - (haw-1))
190 * 64-127: Reserved
191 */
192struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000193 u64 lo;
194 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000195};
196#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000197
Joerg Roedel091d42e2015-06-12 11:56:10 +0200198/*
199 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
200 * if marked present.
201 */
202static phys_addr_t root_entry_lctp(struct root_entry *re)
203{
204 if (!(re->lo & 1))
205 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000206
Joerg Roedel091d42e2015-06-12 11:56:10 +0200207 return re->lo & VTD_PAGE_MASK;
208}
209
210/*
211 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
212 * if marked present.
213 */
214static phys_addr_t root_entry_uctp(struct root_entry *re)
215{
216 if (!(re->hi & 1))
217 return 0;
218
219 return re->hi & VTD_PAGE_MASK;
220}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000221/*
222 * low 64 bits:
223 * 0: present
224 * 1: fault processing disable
225 * 2-3: translation type
226 * 12-63: address space root
227 * high 64 bits:
228 * 0-2: address width
229 * 3-6: aval
230 * 8-23: domain id
231 */
232struct context_entry {
233 u64 lo;
234 u64 hi;
235};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000236
Joerg Roedelcf484d02015-06-12 12:21:46 +0200237static inline void context_clear_pasid_enable(struct context_entry *context)
238{
239 context->lo &= ~(1ULL << 11);
240}
241
242static inline bool context_pasid_enabled(struct context_entry *context)
243{
244 return !!(context->lo & (1ULL << 11));
245}
246
247static inline void context_set_copied(struct context_entry *context)
248{
249 context->hi |= (1ull << 3);
250}
251
252static inline bool context_copied(struct context_entry *context)
253{
254 return !!(context->hi & (1ULL << 3));
255}
256
257static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000258{
259 return (context->lo & 1);
260}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200261
262static inline bool context_present(struct context_entry *context)
263{
264 return context_pasid_enabled(context) ?
265 __context_present(context) :
266 __context_present(context) && !context_copied(context);
267}
268
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000269static inline void context_set_present(struct context_entry *context)
270{
271 context->lo |= 1;
272}
273
274static inline void context_set_fault_enable(struct context_entry *context)
275{
276 context->lo &= (((u64)-1) << 2) | 1;
277}
278
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000279static inline void context_set_translation_type(struct context_entry *context,
280 unsigned long value)
281{
282 context->lo &= (((u64)-1) << 4) | 3;
283 context->lo |= (value & 3) << 2;
284}
285
286static inline void context_set_address_root(struct context_entry *context,
287 unsigned long value)
288{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800289 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000290 context->lo |= value & VTD_PAGE_MASK;
291}
292
293static inline void context_set_address_width(struct context_entry *context,
294 unsigned long value)
295{
296 context->hi |= value & 7;
297}
298
299static inline void context_set_domain_id(struct context_entry *context,
300 unsigned long value)
301{
302 context->hi |= (value & ((1 << 16) - 1)) << 8;
303}
304
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200305static inline int context_domain_id(struct context_entry *c)
306{
307 return((c->hi >> 8) & 0xffff);
308}
309
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000310static inline void context_clear_entry(struct context_entry *context)
311{
312 context->lo = 0;
313 context->hi = 0;
314}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000315
Mark McLoughlin622ba122008-11-20 15:49:46 +0000316/*
317 * 0: readable
318 * 1: writable
319 * 2-6: reserved
320 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800321 * 8-10: available
322 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000323 * 12-63: Host physcial address
324 */
325struct dma_pte {
326 u64 val;
327};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000328
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000329static inline void dma_clear_pte(struct dma_pte *pte)
330{
331 pte->val = 0;
332}
333
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000334static inline u64 dma_pte_addr(struct dma_pte *pte)
335{
David Woodhousec85994e2009-07-01 19:21:24 +0100336#ifdef CONFIG_64BIT
337 return pte->val & VTD_PAGE_MASK;
338#else
339 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100340 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100341#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000342}
343
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000344static inline bool dma_pte_present(struct dma_pte *pte)
345{
346 return (pte->val & 3) != 0;
347}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000348
Allen Kay4399c8b2011-10-14 12:32:46 -0700349static inline bool dma_pte_superpage(struct dma_pte *pte)
350{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200351 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700352}
353
David Woodhouse75e6bf92009-07-02 11:21:16 +0100354static inline int first_pte_in_page(struct dma_pte *pte)
355{
356 return !((unsigned long)pte & ~VTD_PAGE_MASK);
357}
358
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700359/*
360 * This domain is a statically identity mapping domain.
361 * 1. This domain creats a static 1:1 mapping to all usable memory.
362 * 2. It maps to each iommu if successful.
363 * 3. Each iommu mapps to this domain if successful.
364 */
David Woodhouse19943b02009-08-04 16:19:20 +0100365static struct dmar_domain *si_domain;
366static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700367
Joerg Roedel28ccce02015-07-21 14:45:31 +0200368/*
369 * Domain represents a virtual machine, more than one devices
Weidong Han1ce28fe2008-12-08 16:35:39 +0800370 * across iommus may be owned in one domain, e.g. kvm guest.
371 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800372#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800373
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700374/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800375#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700376
Joerg Roedel29a27712015-07-21 17:17:12 +0200377#define for_each_domain_iommu(idx, domain) \
378 for (idx = 0; idx < g_num_of_iommus; idx++) \
379 if (domain->iommu_refcnt[idx])
380
Mark McLoughlin99126f72008-11-20 15:49:47 +0000381struct dmar_domain {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700382 int nid; /* node id */
Joerg Roedel29a27712015-07-21 17:17:12 +0200383
384 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
385 /* Refcount of devices per iommu */
386
Mark McLoughlin99126f72008-11-20 15:49:47 +0000387
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +0200388 u16 iommu_did[DMAR_UNITS_SUPPORTED];
389 /* Domain ids per IOMMU. Use u16 since
390 * domain ids are 16 bit wide according
391 * to VT-d spec, section 9.3 */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000392
Omer Peleg0824c592016-04-20 19:03:35 +0300393 bool has_iotlb_device;
Joerg Roedel00a77de2015-03-26 13:43:08 +0100394 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000395 struct iova_domain iovad; /* iova's that belong to this domain */
396
397 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000398 int gaw; /* max guest address width */
399
400 /* adjusted guest address width, 0 is level 2 30-bit */
401 int agaw;
402
Weidong Han3b5410e2008-12-08 09:17:15 +0800403 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800404
405 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800406 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800407 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100408 int iommu_superpage;/* Level of superpages supported:
409 0 == 4KiB (no superpages), 1 == 2MiB,
410 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800411 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100412
413 struct iommu_domain domain; /* generic domain data structure for
414 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000415};
416
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000417/* PCI domain-device relationship */
418struct device_domain_info {
419 struct list_head link; /* link to domain siblings */
420 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100421 u8 bus; /* PCI bus number */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000422 u8 devfn; /* PCI devfn number */
David Woodhouseb16d0cb2015-10-12 14:17:37 +0100423 u8 pasid_supported:3;
424 u8 pasid_enabled:1;
425 u8 pri_supported:1;
426 u8 pri_enabled:1;
427 u8 ats_supported:1;
428 u8 ats_enabled:1;
429 u8 ats_qdep;
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000430 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800431 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000432 struct dmar_domain *domain; /* pointer to domain */
433};
434
Jiang Liub94e4112014-02-19 14:07:25 +0800435struct dmar_rmrr_unit {
436 struct list_head list; /* list of rmrr units */
437 struct acpi_dmar_header *hdr; /* ACPI header */
438 u64 base_address; /* reserved base address*/
439 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000440 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800441 int devices_cnt; /* target device count */
Eric Auger0659b8d2017-01-19 20:57:53 +0000442 struct iommu_resv_region *resv; /* reserved region handle */
Jiang Liub94e4112014-02-19 14:07:25 +0800443};
444
445struct dmar_atsr_unit {
446 struct list_head list; /* list of ATSR units */
447 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000448 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800449 int devices_cnt; /* target device count */
450 u8 include_all:1; /* include all ports */
451};
452
453static LIST_HEAD(dmar_atsr_units);
454static LIST_HEAD(dmar_rmrr_units);
455
456#define for_each_rmrr_units(rmrr) \
457 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
458
mark gross5e0d2a62008-03-04 15:22:08 -0800459/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800460static int g_num_of_iommus;
461
Jiang Liu92d03cc2014-02-19 14:07:28 +0800462static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463static void domain_remove_dev_info(struct dmar_domain *domain);
Joerg Roedele6de0f82015-07-22 16:30:36 +0200464static void dmar_remove_one_dev_info(struct dmar_domain *domain,
465 struct device *dev);
Joerg Roedel127c7612015-07-23 17:44:46 +0200466static void __dmar_remove_one_dev_info(struct device_domain_info *info);
Joerg Roedel2452d9d2015-07-23 16:20:14 +0200467static void domain_context_clear(struct intel_iommu *iommu,
468 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800469static int domain_detach_iommu(struct dmar_domain *domain,
470 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700471
Suresh Siddhad3f13812011-08-23 17:05:25 -0700472#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800473int dmar_disabled = 0;
474#else
475int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700476#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800477
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200478int intel_iommu_enabled = 0;
479EXPORT_SYMBOL_GPL(intel_iommu_enabled);
480
David Woodhouse2d9e6672010-06-15 10:57:57 +0100481static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700482static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800483static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100484static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100485static int intel_iommu_ecs = 1;
David Woodhouseae853dd2015-09-09 11:58:59 +0100486static int intel_iommu_pasid28;
487static int iommu_identity_mapping;
David Woodhousec83b2f22015-06-12 10:15:49 +0100488
David Woodhouseae853dd2015-09-09 11:58:59 +0100489#define IDENTMAP_ALL 1
490#define IDENTMAP_GFX 2
491#define IDENTMAP_AZALIA 4
David Woodhousec83b2f22015-06-12 10:15:49 +0100492
David Woodhoused42fde72015-10-24 21:33:01 +0200493/* Broadwell and Skylake have broken ECS support — normal so-called "second
494 * level" translation of DMA requests-without-PASID doesn't actually happen
495 * unless you also set the NESTE bit in an extended context-entry. Which of
496 * course means that SVM doesn't work because it's trying to do nested
497 * translation of the physical addresses it finds in the process page tables,
498 * through the IOVA->phys mapping found in the "second level" page tables.
499 *
500 * The VT-d specification was retroactively changed to change the definition
501 * of the capability bits and pretend that Broadwell/Skylake never happened...
502 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
503 * for some reason it was the PASID capability bit which was redefined (from
504 * bit 28 on BDW/SKL to bit 40 in future).
505 *
506 * So our test for ECS needs to eschew those implementations which set the old
507 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
508 * Unless we are working around the 'pasid28' limitations, that is, by putting
509 * the device into passthrough mode for normal DMA and thus masking the bug.
510 */
David Woodhousec83b2f22015-06-12 10:15:49 +0100511#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
David Woodhoused42fde72015-10-24 21:33:01 +0200512 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
513/* PASID support is thus enabled if ECS is enabled and *either* of the old
514 * or new capability bits are set. */
515#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
516 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517
David Woodhousec0771df2011-10-14 20:59:46 +0100518int intel_iommu_gfx_mapped;
519EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
520
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
522static DEFINE_SPINLOCK(device_domain_lock);
523static LIST_HEAD(device_domain_list);
524
Joerg Roedelb0119e82017-02-01 13:23:08 +0100525const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100526
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200527static bool translation_pre_enabled(struct intel_iommu *iommu)
528{
529 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
530}
531
Joerg Roedel091d42e2015-06-12 11:56:10 +0200532static void clear_translation_pre_enabled(struct intel_iommu *iommu)
533{
534 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
535}
536
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200537static void init_translation_status(struct intel_iommu *iommu)
538{
539 u32 gsts;
540
541 gsts = readl(iommu->reg + DMAR_GSTS_REG);
542 if (gsts & DMA_GSTS_TES)
543 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
544}
545
Joerg Roedel00a77de2015-03-26 13:43:08 +0100546/* Convert generic 'struct iommu_domain to private struct dmar_domain */
547static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
548{
549 return container_of(dom, struct dmar_domain, domain);
550}
551
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700552static int __init intel_iommu_setup(char *str)
553{
554 if (!str)
555 return -EINVAL;
556 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800557 if (!strncmp(str, "on", 2)) {
558 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200559 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800560 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700561 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200562 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700563 } else if (!strncmp(str, "igfx_off", 8)) {
564 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200565 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700566 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200567 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700568 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800569 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200570 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800571 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100572 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200573 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100574 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100575 } else if (!strncmp(str, "ecs_off", 7)) {
576 printk(KERN_INFO
577 "Intel-IOMMU: disable extended context table support\n");
578 intel_iommu_ecs = 0;
David Woodhouseae853dd2015-09-09 11:58:59 +0100579 } else if (!strncmp(str, "pasid28", 7)) {
580 printk(KERN_INFO
581 "Intel-IOMMU: enable pre-production PASID support\n");
582 intel_iommu_pasid28 = 1;
583 iommu_identity_mapping |= IDENTMAP_GFX;
Shaohua Libfd20f12017-04-26 09:18:35 -0700584 } else if (!strncmp(str, "tboot_noforce", 13)) {
585 printk(KERN_INFO
586 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
587 intel_iommu_tboot_noforce = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700588 }
589
590 str += strcspn(str, ",");
591 while (*str == ',')
592 str++;
593 }
594 return 0;
595}
596__setup("intel_iommu=", intel_iommu_setup);
597
598static struct kmem_cache *iommu_domain_cache;
599static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700600
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200601static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
602{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200603 struct dmar_domain **domains;
604 int idx = did >> 8;
605
606 domains = iommu->domains[idx];
607 if (!domains)
608 return NULL;
609
610 return domains[did & 0xff];
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200611}
612
613static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
614 struct dmar_domain *domain)
615{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200616 struct dmar_domain **domains;
617 int idx = did >> 8;
618
619 if (!iommu->domains[idx]) {
620 size_t size = 256 * sizeof(struct dmar_domain *);
621 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
622 }
623
624 domains = iommu->domains[idx];
625 if (WARN_ON(!domains))
626 return;
627 else
628 domains[did & 0xff] = domain;
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200629}
630
Suresh Siddha4c923d42009-10-02 11:01:24 -0700631static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700632{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700633 struct page *page;
634 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700635
Suresh Siddha4c923d42009-10-02 11:01:24 -0700636 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
637 if (page)
638 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700639 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700640}
641
642static inline void free_pgtable_page(void *vaddr)
643{
644 free_page((unsigned long)vaddr);
645}
646
647static inline void *alloc_domain_mem(void)
648{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900649 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700650}
651
Kay, Allen M38717942008-09-09 18:37:29 +0300652static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700653{
654 kmem_cache_free(iommu_domain_cache, vaddr);
655}
656
657static inline void * alloc_devinfo_mem(void)
658{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900659 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700660}
661
662static inline void free_devinfo_mem(void *vaddr)
663{
664 kmem_cache_free(iommu_devinfo_cache, vaddr);
665}
666
Jiang Liuab8dfe22014-07-11 14:19:27 +0800667static inline int domain_type_is_vm(struct dmar_domain *domain)
668{
669 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
670}
671
Joerg Roedel28ccce02015-07-21 14:45:31 +0200672static inline int domain_type_is_si(struct dmar_domain *domain)
673{
674 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
675}
676
Jiang Liuab8dfe22014-07-11 14:19:27 +0800677static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
678{
679 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
680 DOMAIN_FLAG_STATIC_IDENTITY);
681}
Weidong Han1b573682008-12-08 15:34:06 +0800682
Jiang Liu162d1b12014-07-11 14:19:35 +0800683static inline int domain_pfn_supported(struct dmar_domain *domain,
684 unsigned long pfn)
685{
686 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
687
688 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
689}
690
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700691static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800692{
693 unsigned long sagaw;
694 int agaw = -1;
695
696 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700697 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800698 agaw >= 0; agaw--) {
699 if (test_bit(agaw, &sagaw))
700 break;
701 }
702
703 return agaw;
704}
705
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700706/*
707 * Calculate max SAGAW for each iommu.
708 */
709int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
710{
711 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
712}
713
714/*
715 * calculate agaw for each iommu.
716 * "SAGAW" may be different across iommus, use a default agaw, and
717 * get a supported less agaw for iommus that don't support the default agaw.
718 */
719int iommu_calculate_agaw(struct intel_iommu *iommu)
720{
721 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
722}
723
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700724/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800725static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
726{
727 int iommu_id;
728
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700729 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800730 BUG_ON(domain_type_is_vm_or_si(domain));
Joerg Roedel29a27712015-07-21 17:17:12 +0200731 for_each_domain_iommu(iommu_id, domain)
732 break;
733
Weidong Han8c11e792008-12-08 15:29:22 +0800734 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
735 return NULL;
736
737 return g_iommus[iommu_id];
738}
739
Weidong Han8e6040972008-12-08 15:49:06 +0800740static void domain_update_iommu_coherency(struct dmar_domain *domain)
741{
David Woodhoused0501962014-03-11 17:10:29 -0700742 struct dmar_drhd_unit *drhd;
743 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100744 bool found = false;
745 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800746
David Woodhoused0501962014-03-11 17:10:29 -0700747 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800748
Joerg Roedel29a27712015-07-21 17:17:12 +0200749 for_each_domain_iommu(i, domain) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100750 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800751 if (!ecap_coherent(g_iommus[i]->ecap)) {
752 domain->iommu_coherency = 0;
753 break;
754 }
Weidong Han8e6040972008-12-08 15:49:06 +0800755 }
David Woodhoused0501962014-03-11 17:10:29 -0700756 if (found)
757 return;
758
759 /* No hardware attached; use lowest common denominator */
760 rcu_read_lock();
761 for_each_active_iommu(iommu, drhd) {
762 if (!ecap_coherent(iommu->ecap)) {
763 domain->iommu_coherency = 0;
764 break;
765 }
766 }
767 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800768}
769
Jiang Liu161f6932014-07-11 14:19:37 +0800770static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100771{
Allen Kay8140a952011-10-14 12:32:17 -0700772 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800773 struct intel_iommu *iommu;
774 int ret = 1;
775
776 rcu_read_lock();
777 for_each_active_iommu(iommu, drhd) {
778 if (iommu != skip) {
779 if (!ecap_sc_support(iommu->ecap)) {
780 ret = 0;
781 break;
782 }
783 }
784 }
785 rcu_read_unlock();
786
787 return ret;
788}
789
790static int domain_update_iommu_superpage(struct intel_iommu *skip)
791{
792 struct dmar_drhd_unit *drhd;
793 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700794 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100795
796 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800797 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100798 }
799
Allen Kay8140a952011-10-14 12:32:17 -0700800 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800801 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700802 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800803 if (iommu != skip) {
804 mask &= cap_super_page_val(iommu->cap);
805 if (!mask)
806 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100807 }
808 }
Jiang Liu0e242612014-02-19 14:07:34 +0800809 rcu_read_unlock();
810
Jiang Liu161f6932014-07-11 14:19:37 +0800811 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100812}
813
Sheng Yang58c610b2009-03-18 15:33:05 +0800814/* Some capabilities may be different across iommus */
815static void domain_update_iommu_cap(struct dmar_domain *domain)
816{
817 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800818 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
819 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800820}
821
David Woodhouse03ecc322015-02-13 14:35:21 +0000822static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
823 u8 bus, u8 devfn, int alloc)
824{
825 struct root_entry *root = &iommu->root_entry[bus];
826 struct context_entry *context;
827 u64 *entry;
828
Joerg Roedel4df4eab2015-08-25 10:54:28 +0200829 entry = &root->lo;
David Woodhousec83b2f22015-06-12 10:15:49 +0100830 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000831 if (devfn >= 0x80) {
832 devfn -= 0x80;
833 entry = &root->hi;
834 }
835 devfn *= 2;
836 }
David Woodhouse03ecc322015-02-13 14:35:21 +0000837 if (*entry & 1)
838 context = phys_to_virt(*entry & VTD_PAGE_MASK);
839 else {
840 unsigned long phy_addr;
841 if (!alloc)
842 return NULL;
843
844 context = alloc_pgtable_page(iommu->node);
845 if (!context)
846 return NULL;
847
848 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
849 phy_addr = virt_to_phys((void *)context);
850 *entry = phy_addr | 1;
851 __iommu_flush_cache(iommu, entry, sizeof(*entry));
852 }
853 return &context[devfn];
854}
855
David Woodhouse4ed6a542015-05-11 14:59:20 +0100856static int iommu_dummy(struct device *dev)
857{
858 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
859}
860
David Woodhouse156baca2014-03-09 14:00:57 -0700861static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800862{
863 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800864 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700865 struct device *tmp;
866 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800867 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800868 int i;
869
David Woodhouse4ed6a542015-05-11 14:59:20 +0100870 if (iommu_dummy(dev))
871 return NULL;
872
David Woodhouse156baca2014-03-09 14:00:57 -0700873 if (dev_is_pci(dev)) {
Ashok Raj1c387182016-10-21 15:32:05 -0700874 struct pci_dev *pf_pdev;
875
David Woodhouse156baca2014-03-09 14:00:57 -0700876 pdev = to_pci_dev(dev);
Jon Derrick5823e332017-08-30 15:05:59 -0600877
878#ifdef CONFIG_X86
879 /* VMD child devices currently cannot be handled individually */
880 if (is_vmd(pdev->bus))
881 return NULL;
882#endif
883
Ashok Raj1c387182016-10-21 15:32:05 -0700884 /* VFs aren't listed in scope tables; we need to look up
885 * the PF instead to find the IOMMU. */
886 pf_pdev = pci_physfn(pdev);
887 dev = &pf_pdev->dev;
David Woodhouse156baca2014-03-09 14:00:57 -0700888 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100889 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700890 dev = &ACPI_COMPANION(dev)->dev;
891
Jiang Liu0e242612014-02-19 14:07:34 +0800892 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800893 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700894 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100895 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800896
Jiang Liub683b232014-02-19 14:07:32 +0800897 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700898 drhd->devices_cnt, i, tmp) {
899 if (tmp == dev) {
Ashok Raj1c387182016-10-21 15:32:05 -0700900 /* For a VF use its original BDF# not that of the PF
901 * which we used for the IOMMU lookup. Strictly speaking
902 * we could do this for all PCI devices; we only need to
903 * get the BDF# from the scope table for ACPI matches. */
Koos Vriezen5003ae12017-03-01 21:02:50 +0100904 if (pdev && pdev->is_virtfn)
Ashok Raj1c387182016-10-21 15:32:05 -0700905 goto got_pdev;
906
David Woodhouse156baca2014-03-09 14:00:57 -0700907 *bus = drhd->devices[i].bus;
908 *devfn = drhd->devices[i].devfn;
909 goto out;
910 }
911
912 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000913 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700914
915 ptmp = to_pci_dev(tmp);
916 if (ptmp->subordinate &&
917 ptmp->subordinate->number <= pdev->bus->number &&
918 ptmp->subordinate->busn_res.end >= pdev->bus->number)
919 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100920 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800921
David Woodhouse156baca2014-03-09 14:00:57 -0700922 if (pdev && drhd->include_all) {
923 got_pdev:
924 *bus = pdev->bus->number;
925 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800926 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700927 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800928 }
Jiang Liub683b232014-02-19 14:07:32 +0800929 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700930 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800931 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800932
Jiang Liub683b232014-02-19 14:07:32 +0800933 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800934}
935
Weidong Han5331fe62008-12-08 23:00:00 +0800936static void domain_flush_cache(struct dmar_domain *domain,
937 void *addr, int size)
938{
939 if (!domain->iommu_coherency)
940 clflush_cache_range(addr, size);
941}
942
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
944{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000946 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 unsigned long flags;
948
949 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000950 context = iommu_context_addr(iommu, bus, devfn, 0);
951 if (context)
952 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953 spin_unlock_irqrestore(&iommu->lock, flags);
954 return ret;
955}
956
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957static void free_context_table(struct intel_iommu *iommu)
958{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 int i;
960 unsigned long flags;
961 struct context_entry *context;
962
963 spin_lock_irqsave(&iommu->lock, flags);
964 if (!iommu->root_entry) {
965 goto out;
966 }
967 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000968 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969 if (context)
970 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000971
David Woodhousec83b2f22015-06-12 10:15:49 +0100972 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000973 continue;
974
975 context = iommu_context_addr(iommu, i, 0x80, 0);
976 if (context)
977 free_pgtable_page(context);
978
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979 }
980 free_pgtable_page(iommu->root_entry);
981 iommu->root_entry = NULL;
982out:
983 spin_unlock_irqrestore(&iommu->lock, flags);
984}
985
David Woodhouseb026fd22009-06-28 10:37:25 +0100986static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000987 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 struct dma_pte *parent, *pte = NULL;
990 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700991 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992
993 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200994
Jiang Liu162d1b12014-07-11 14:19:35 +0800995 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200996 /* Address beyond IOMMU's addressing capabilities. */
997 return NULL;
998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999 parent = domain->pgd;
1000
David Woodhouse5cf0a762014-03-19 16:07:49 +00001001 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002 void *tmp_page;
1003
David Woodhouseb026fd22009-06-28 10:37:25 +01001004 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +00001006 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001007 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +00001008 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009 break;
1010
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001011 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +01001012 uint64_t pteval;
1013
Suresh Siddha4c923d42009-10-02 11:01:24 -07001014 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015
David Woodhouse206a73c2009-07-01 19:30:28 +01001016 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +01001018
David Woodhousec85994e2009-07-01 19:21:24 +01001019 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -04001020 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +08001021 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +01001022 /* Someone else set it while we were thinking; use theirs. */
1023 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +08001024 else
David Woodhousec85994e2009-07-01 19:21:24 +01001025 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001026 }
David Woodhouse5cf0a762014-03-19 16:07:49 +00001027 if (level == 1)
1028 break;
1029
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001030 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001031 level--;
1032 }
1033
David Woodhouse5cf0a762014-03-19 16:07:49 +00001034 if (!*target_level)
1035 *target_level = level;
1036
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001037 return pte;
1038}
1039
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001040
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +01001042static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1043 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001044 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001045{
1046 struct dma_pte *parent, *pte = NULL;
1047 int total = agaw_to_level(domain->agaw);
1048 int offset;
1049
1050 parent = domain->pgd;
1051 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +01001052 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001053 pte = &parent[offset];
1054 if (level == total)
1055 return pte;
1056
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001057 if (!dma_pte_present(pte)) {
1058 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001059 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001060 }
1061
Yijing Wange16922a2014-05-20 20:37:51 +08001062 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001063 *large_page = total;
1064 return pte;
1065 }
1066
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001067 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068 total--;
1069 }
1070 return NULL;
1071}
1072
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001073/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001074static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +01001075 unsigned long start_pfn,
1076 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001077{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001078 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001079 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001080
Jiang Liu162d1b12014-07-11 14:19:35 +08001081 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1082 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001083 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001084
David Woodhouse04b18e62009-06-27 19:15:01 +01001085 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001086 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001087 large_page = 1;
1088 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001089 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001090 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001091 continue;
1092 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001093 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001094 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001095 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001096 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001097 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1098
David Woodhouse310a5ab2009-06-28 18:52:20 +01001099 domain_flush_cache(domain, first_pte,
1100 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001101
1102 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001103}
1104
Alex Williamson3269ee02013-06-15 10:27:19 -06001105static void dma_pte_free_level(struct dmar_domain *domain, int level,
David Dillowbc24c572017-06-28 19:42:23 -07001106 int retain_level, struct dma_pte *pte,
1107 unsigned long pfn, unsigned long start_pfn,
1108 unsigned long last_pfn)
Alex Williamson3269ee02013-06-15 10:27:19 -06001109{
1110 pfn = max(start_pfn, pfn);
1111 pte = &pte[pfn_level_offset(pfn, level)];
1112
1113 do {
1114 unsigned long level_pfn;
1115 struct dma_pte *level_pte;
1116
1117 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1118 goto next;
1119
David Dillowf7116e12017-01-30 19:11:11 -08001120 level_pfn = pfn & level_mask(level);
Alex Williamson3269ee02013-06-15 10:27:19 -06001121 level_pte = phys_to_virt(dma_pte_addr(pte));
1122
David Dillowbc24c572017-06-28 19:42:23 -07001123 if (level > 2) {
1124 dma_pte_free_level(domain, level - 1, retain_level,
1125 level_pte, level_pfn, start_pfn,
1126 last_pfn);
1127 }
Alex Williamson3269ee02013-06-15 10:27:19 -06001128
David Dillowbc24c572017-06-28 19:42:23 -07001129 /*
1130 * Free the page table if we're below the level we want to
1131 * retain and the range covers the entire table.
1132 */
1133 if (level < retain_level && !(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001134 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001135 dma_clear_pte(pte);
1136 domain_flush_cache(domain, pte, sizeof(*pte));
1137 free_pgtable_page(level_pte);
1138 }
1139next:
1140 pfn += level_size(level);
1141 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1142}
1143
David Dillowbc24c572017-06-28 19:42:23 -07001144/*
1145 * clear last level (leaf) ptes and free page table pages below the
1146 * level we wish to keep intact.
1147 */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001149 unsigned long start_pfn,
David Dillowbc24c572017-06-28 19:42:23 -07001150 unsigned long last_pfn,
1151 int retain_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001152{
Jiang Liu162d1b12014-07-11 14:19:35 +08001153 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1154 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001155 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001156
Jiang Liud41a4ad2014-07-11 14:19:34 +08001157 dma_pte_clear_range(domain, start_pfn, last_pfn);
1158
David Woodhousef3a0a522009-06-30 03:40:07 +01001159 /* We don't need lock here; nobody else touches the iova range */
David Dillowbc24c572017-06-28 19:42:23 -07001160 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
Alex Williamson3269ee02013-06-15 10:27:19 -06001161 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001162
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001164 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165 free_pgtable_page(domain->pgd);
1166 domain->pgd = NULL;
1167 }
1168}
1169
David Woodhouseea8ea462014-03-05 17:09:32 +00001170/* When a page at a given level is being unlinked from its parent, we don't
1171 need to *modify* it at all. All we need to do is make a list of all the
1172 pages which can be freed just as soon as we've flushed the IOTLB and we
1173 know the hardware page-walk will no longer touch them.
1174 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1175 be freed. */
1176static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1177 int level, struct dma_pte *pte,
1178 struct page *freelist)
1179{
1180 struct page *pg;
1181
1182 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1183 pg->freelist = freelist;
1184 freelist = pg;
1185
1186 if (level == 1)
1187 return freelist;
1188
Jiang Liuadeb2592014-04-09 10:20:39 +08001189 pte = page_address(pg);
1190 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001191 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1192 freelist = dma_pte_list_pagetables(domain, level - 1,
1193 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001194 pte++;
1195 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001196
1197 return freelist;
1198}
1199
1200static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1201 struct dma_pte *pte, unsigned long pfn,
1202 unsigned long start_pfn,
1203 unsigned long last_pfn,
1204 struct page *freelist)
1205{
1206 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1207
1208 pfn = max(start_pfn, pfn);
1209 pte = &pte[pfn_level_offset(pfn, level)];
1210
1211 do {
1212 unsigned long level_pfn;
1213
1214 if (!dma_pte_present(pte))
1215 goto next;
1216
1217 level_pfn = pfn & level_mask(level);
1218
1219 /* If range covers entire pagetable, free it */
1220 if (start_pfn <= level_pfn &&
1221 last_pfn >= level_pfn + level_size(level) - 1) {
1222 /* These suborbinate page tables are going away entirely. Don't
1223 bother to clear them; we're just going to *free* them. */
1224 if (level > 1 && !dma_pte_superpage(pte))
1225 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1226
1227 dma_clear_pte(pte);
1228 if (!first_pte)
1229 first_pte = pte;
1230 last_pte = pte;
1231 } else if (level > 1) {
1232 /* Recurse down into a level that isn't *entirely* obsolete */
1233 freelist = dma_pte_clear_level(domain, level - 1,
1234 phys_to_virt(dma_pte_addr(pte)),
1235 level_pfn, start_pfn, last_pfn,
1236 freelist);
1237 }
1238next:
1239 pfn += level_size(level);
1240 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1241
1242 if (first_pte)
1243 domain_flush_cache(domain, first_pte,
1244 (void *)++last_pte - (void *)first_pte);
1245
1246 return freelist;
1247}
1248
1249/* We can't just free the pages because the IOMMU may still be walking
1250 the page tables, and may have cached the intermediate levels. The
1251 pages can only be freed after the IOTLB flush has been done. */
Joerg Roedelb6904202015-08-13 11:32:18 +02001252static struct page *domain_unmap(struct dmar_domain *domain,
1253 unsigned long start_pfn,
1254 unsigned long last_pfn)
David Woodhouseea8ea462014-03-05 17:09:32 +00001255{
David Woodhouseea8ea462014-03-05 17:09:32 +00001256 struct page *freelist = NULL;
1257
Jiang Liu162d1b12014-07-11 14:19:35 +08001258 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1259 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001260 BUG_ON(start_pfn > last_pfn);
1261
1262 /* we don't need lock here; nobody else touches the iova range */
1263 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1264 domain->pgd, 0, start_pfn, last_pfn, NULL);
1265
1266 /* free pgd */
1267 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1268 struct page *pgd_page = virt_to_page(domain->pgd);
1269 pgd_page->freelist = freelist;
1270 freelist = pgd_page;
1271
1272 domain->pgd = NULL;
1273 }
1274
1275 return freelist;
1276}
1277
Joerg Roedelb6904202015-08-13 11:32:18 +02001278static void dma_free_pagelist(struct page *freelist)
David Woodhouseea8ea462014-03-05 17:09:32 +00001279{
1280 struct page *pg;
1281
1282 while ((pg = freelist)) {
1283 freelist = pg->freelist;
1284 free_pgtable_page(page_address(pg));
1285 }
1286}
1287
Joerg Roedel13cf0172017-08-11 11:40:10 +02001288static void iova_entry_free(unsigned long data)
1289{
1290 struct page *freelist = (struct page *)data;
1291
1292 dma_free_pagelist(freelist);
1293}
1294
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001295/* iommu handling */
1296static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1297{
1298 struct root_entry *root;
1299 unsigned long flags;
1300
Suresh Siddha4c923d42009-10-02 11:01:24 -07001301 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001302 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001303 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001304 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001306 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001308 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309
1310 spin_lock_irqsave(&iommu->lock, flags);
1311 iommu->root_entry = root;
1312 spin_unlock_irqrestore(&iommu->lock, flags);
1313
1314 return 0;
1315}
1316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317static void iommu_set_root_entry(struct intel_iommu *iommu)
1318{
David Woodhouse03ecc322015-02-13 14:35:21 +00001319 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001320 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321 unsigned long flag;
1322
David Woodhouse03ecc322015-02-13 14:35:21 +00001323 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001324 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001325 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001327 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001328 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329
David Woodhousec416daa2009-05-10 20:30:58 +01001330 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001331
1332 /* Make sure hardware complete it */
1333 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001334 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001336 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337}
1338
1339static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1340{
1341 u32 val;
1342 unsigned long flag;
1343
David Woodhouse9af88142009-02-13 23:18:03 +00001344 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001346
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001347 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001348 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349
1350 /* Make sure hardware complete it */
1351 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001352 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001353
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001354 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001355}
1356
1357/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001358static void __iommu_flush_context(struct intel_iommu *iommu,
1359 u16 did, u16 source_id, u8 function_mask,
1360 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001361{
1362 u64 val = 0;
1363 unsigned long flag;
1364
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365 switch (type) {
1366 case DMA_CCMD_GLOBAL_INVL:
1367 val = DMA_CCMD_GLOBAL_INVL;
1368 break;
1369 case DMA_CCMD_DOMAIN_INVL:
1370 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1371 break;
1372 case DMA_CCMD_DEVICE_INVL:
1373 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1374 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1375 break;
1376 default:
1377 BUG();
1378 }
1379 val |= DMA_CCMD_ICC;
1380
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001381 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1383
1384 /* Make sure hardware complete it */
1385 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1386 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1387
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001388 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001389}
1390
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001391/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001392static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1393 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394{
1395 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1396 u64 val = 0, val_iva = 0;
1397 unsigned long flag;
1398
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399 switch (type) {
1400 case DMA_TLB_GLOBAL_FLUSH:
1401 /* global flush doesn't need set IVA_REG */
1402 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1403 break;
1404 case DMA_TLB_DSI_FLUSH:
1405 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1406 break;
1407 case DMA_TLB_PSI_FLUSH:
1408 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001409 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 val_iva = size_order | addr;
1411 break;
1412 default:
1413 BUG();
1414 }
1415 /* Note: set drain read/write */
1416#if 0
1417 /*
1418 * This is probably to be super secure.. Looks like we can
1419 * ignore it without any impact.
1420 */
1421 if (cap_read_drain(iommu->cap))
1422 val |= DMA_TLB_READ_DRAIN;
1423#endif
1424 if (cap_write_drain(iommu->cap))
1425 val |= DMA_TLB_WRITE_DRAIN;
1426
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001427 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428 /* Note: Only uses first TLB reg currently */
1429 if (val_iva)
1430 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1431 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1432
1433 /* Make sure hardware complete it */
1434 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1435 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1436
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001437 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438
1439 /* check IOTLB invalidation granularity */
1440 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001441 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001443 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001444 (unsigned long long)DMA_TLB_IIRG(type),
1445 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446}
1447
David Woodhouse64ae8922014-03-09 12:52:30 -07001448static struct device_domain_info *
1449iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1450 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001451{
Yu Zhao93a23a72009-05-18 13:51:37 +08001452 struct device_domain_info *info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001453
Joerg Roedel55d94042015-07-22 16:50:40 +02001454 assert_spin_locked(&device_domain_lock);
1455
Yu Zhao93a23a72009-05-18 13:51:37 +08001456 if (!iommu->qi)
1457 return NULL;
1458
Yu Zhao93a23a72009-05-18 13:51:37 +08001459 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001460 if (info->iommu == iommu && info->bus == bus &&
1461 info->devfn == devfn) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001462 if (info->ats_supported && info->dev)
1463 return info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001464 break;
1465 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001466
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001467 return NULL;
Yu Zhao93a23a72009-05-18 13:51:37 +08001468}
1469
Omer Peleg0824c592016-04-20 19:03:35 +03001470static void domain_update_iotlb(struct dmar_domain *domain)
1471{
1472 struct device_domain_info *info;
1473 bool has_iotlb_device = false;
1474
1475 assert_spin_locked(&device_domain_lock);
1476
1477 list_for_each_entry(info, &domain->devices, link) {
1478 struct pci_dev *pdev;
1479
1480 if (!info->dev || !dev_is_pci(info->dev))
1481 continue;
1482
1483 pdev = to_pci_dev(info->dev);
1484 if (pdev->ats_enabled) {
1485 has_iotlb_device = true;
1486 break;
1487 }
1488 }
1489
1490 domain->has_iotlb_device = has_iotlb_device;
1491}
1492
Yu Zhao93a23a72009-05-18 13:51:37 +08001493static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1494{
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001495 struct pci_dev *pdev;
1496
Omer Peleg0824c592016-04-20 19:03:35 +03001497 assert_spin_locked(&device_domain_lock);
1498
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001499 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001500 return;
1501
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001502 pdev = to_pci_dev(info->dev);
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001503
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001504#ifdef CONFIG_INTEL_IOMMU_SVM
1505 /* The PCIe spec, in its wisdom, declares that the behaviour of
1506 the device if you enable PASID support after ATS support is
1507 undefined. So always enable PASID support on devices which
1508 have it, even if we can't yet know if we're ever going to
1509 use it. */
1510 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1511 info->pasid_enabled = 1;
1512
1513 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1514 info->pri_enabled = 1;
1515#endif
1516 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1517 info->ats_enabled = 1;
Omer Peleg0824c592016-04-20 19:03:35 +03001518 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001519 info->ats_qdep = pci_ats_queue_depth(pdev);
1520 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001521}
1522
1523static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1524{
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001525 struct pci_dev *pdev;
1526
Omer Peleg0824c592016-04-20 19:03:35 +03001527 assert_spin_locked(&device_domain_lock);
1528
Jeremy McNicollda972fb2016-01-14 21:33:06 -08001529 if (!dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001530 return;
1531
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001532 pdev = to_pci_dev(info->dev);
1533
1534 if (info->ats_enabled) {
1535 pci_disable_ats(pdev);
1536 info->ats_enabled = 0;
Omer Peleg0824c592016-04-20 19:03:35 +03001537 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001538 }
1539#ifdef CONFIG_INTEL_IOMMU_SVM
1540 if (info->pri_enabled) {
1541 pci_disable_pri(pdev);
1542 info->pri_enabled = 0;
1543 }
1544 if (info->pasid_enabled) {
1545 pci_disable_pasid(pdev);
1546 info->pasid_enabled = 0;
1547 }
1548#endif
Yu Zhao93a23a72009-05-18 13:51:37 +08001549}
1550
1551static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1552 u64 addr, unsigned mask)
1553{
1554 u16 sid, qdep;
1555 unsigned long flags;
1556 struct device_domain_info *info;
1557
Omer Peleg0824c592016-04-20 19:03:35 +03001558 if (!domain->has_iotlb_device)
1559 return;
1560
Yu Zhao93a23a72009-05-18 13:51:37 +08001561 spin_lock_irqsave(&device_domain_lock, flags);
1562 list_for_each_entry(info, &domain->devices, link) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001563 if (!info->ats_enabled)
Yu Zhao93a23a72009-05-18 13:51:37 +08001564 continue;
1565
1566 sid = info->bus << 8 | info->devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001567 qdep = info->ats_qdep;
Yu Zhao93a23a72009-05-18 13:51:37 +08001568 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1569 }
1570 spin_unlock_irqrestore(&device_domain_lock, flags);
1571}
1572
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001573static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1574 struct dmar_domain *domain,
1575 unsigned long pfn, unsigned int pages,
1576 int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001577{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001578 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001579 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001580 u16 did = domain->iommu_did[iommu->seq_id];
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001581
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582 BUG_ON(pages == 0);
1583
David Woodhouseea8ea462014-03-05 17:09:32 +00001584 if (ih)
1585 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001586 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001587 * Fallback to domain selective flush if no PSI support or the size is
1588 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001589 * PSI requires page size to be 2 ^ x, and the base address is naturally
1590 * aligned to the size
1591 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001592 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1593 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001594 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001595 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001596 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001597 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001598
1599 /*
Nadav Amit82653632010-04-01 13:24:40 +03001600 * In caching mode, changes of pages from non-present to present require
1601 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001602 */
Nadav Amit82653632010-04-01 13:24:40 +03001603 if (!cap_caching_mode(iommu->cap) || !map)
Joerg Roedel9452d5b2015-07-21 10:00:56 +02001604 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1605 addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001606}
1607
Joerg Roedel13cf0172017-08-11 11:40:10 +02001608static void iommu_flush_iova(struct iova_domain *iovad)
1609{
1610 struct dmar_domain *domain;
1611 int idx;
1612
1613 domain = container_of(iovad, struct dmar_domain, iovad);
1614
1615 for_each_domain_iommu(idx, domain) {
1616 struct intel_iommu *iommu = g_iommus[idx];
1617 u16 did = domain->iommu_did[iommu->seq_id];
1618
1619 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1620
1621 if (!cap_caching_mode(iommu->cap))
1622 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1623 0, MAX_AGAW_PFN_WIDTH);
1624 }
1625}
1626
mark grossf8bab732008-02-08 04:18:38 -08001627static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1628{
1629 u32 pmen;
1630 unsigned long flags;
1631
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001632 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001633 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1634 pmen &= ~DMA_PMEN_EPM;
1635 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1636
1637 /* wait for the protected region status bit to clear */
1638 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1639 readl, !(pmen & DMA_PMEN_PRS), pmen);
1640
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001641 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001642}
1643
Jiang Liu2a41cce2014-07-11 14:19:33 +08001644static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645{
1646 u32 sts;
1647 unsigned long flags;
1648
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001649 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001650 iommu->gcmd |= DMA_GCMD_TE;
1651 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001652
1653 /* Make sure hardware complete it */
1654 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001655 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001657 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658}
1659
Jiang Liu2a41cce2014-07-11 14:19:33 +08001660static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001661{
1662 u32 sts;
1663 unsigned long flag;
1664
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001665 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001666 iommu->gcmd &= ~DMA_GCMD_TE;
1667 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1668
1669 /* Make sure hardware complete it */
1670 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001671 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001673 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674}
1675
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001676
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001677static int iommu_init_domains(struct intel_iommu *iommu)
1678{
Joerg Roedel8bf47812015-07-21 10:41:21 +02001679 u32 ndomains, nlongs;
1680 size_t size;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681
1682 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001683 pr_debug("%s: Number of Domains supported <%d>\n",
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001684 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 nlongs = BITS_TO_LONGS(ndomains);
1686
Donald Dutile94a91b502009-08-20 16:51:34 -04001687 spin_lock_init(&iommu->lock);
1688
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1690 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001691 pr_err("%s: Allocating domain id array failed\n",
1692 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001693 return -ENOMEM;
1694 }
Joerg Roedel8bf47812015-07-21 10:41:21 +02001695
Wei Yang86f004c2016-05-21 02:41:51 +00001696 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001697 iommu->domains = kzalloc(size, GFP_KERNEL);
1698
1699 if (iommu->domains) {
1700 size = 256 * sizeof(struct dmar_domain *);
1701 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1702 }
1703
1704 if (!iommu->domains || !iommu->domains[0]) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001705 pr_err("%s: Allocating domain array failed\n",
1706 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001707 kfree(iommu->domain_ids);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001708 kfree(iommu->domains);
Jiang Liu852bdb02014-01-06 14:18:11 +08001709 iommu->domain_ids = NULL;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001710 iommu->domains = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711 return -ENOMEM;
1712 }
1713
Joerg Roedel8bf47812015-07-21 10:41:21 +02001714
1715
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001716 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001717 * If Caching mode is set, then invalid translations are tagged
1718 * with domain-id 0, hence we need to pre-allocate it. We also
1719 * use domain-id 0 as a marker for non-allocated domain-id, so
1720 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001722 set_bit(0, iommu->domain_ids);
1723
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724 return 0;
1725}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726
Jiang Liuffebeb42014-11-09 22:48:02 +08001727static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728{
Joerg Roedel29a27712015-07-21 17:17:12 +02001729 struct device_domain_info *info, *tmp;
Joerg Roedel55d94042015-07-22 16:50:40 +02001730 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731
Joerg Roedel29a27712015-07-21 17:17:12 +02001732 if (!iommu->domains || !iommu->domain_ids)
1733 return;
Jiang Liua4eaa862014-02-19 14:07:30 +08001734
Joerg Roedelbea64032016-11-08 15:08:26 +01001735again:
Joerg Roedel55d94042015-07-22 16:50:40 +02001736 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001737 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1738 struct dmar_domain *domain;
1739
1740 if (info->iommu != iommu)
1741 continue;
1742
1743 if (!info->dev || !info->domain)
1744 continue;
1745
1746 domain = info->domain;
1747
Joerg Roedelbea64032016-11-08 15:08:26 +01001748 __dmar_remove_one_dev_info(info);
Joerg Roedel29a27712015-07-21 17:17:12 +02001749
Joerg Roedelbea64032016-11-08 15:08:26 +01001750 if (!domain_type_is_vm_or_si(domain)) {
1751 /*
1752 * The domain_exit() function can't be called under
1753 * device_domain_lock, as it takes this lock itself.
1754 * So release the lock here and re-run the loop
1755 * afterwards.
1756 */
1757 spin_unlock_irqrestore(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001758 domain_exit(domain);
Joerg Roedelbea64032016-11-08 15:08:26 +01001759 goto again;
1760 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001761 }
Joerg Roedel55d94042015-07-22 16:50:40 +02001762 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763
1764 if (iommu->gcmd & DMA_GCMD_TE)
1765 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001766}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767
Jiang Liuffebeb42014-11-09 22:48:02 +08001768static void free_dmar_iommu(struct intel_iommu *iommu)
1769{
1770 if ((iommu->domains) && (iommu->domain_ids)) {
Wei Yang86f004c2016-05-21 02:41:51 +00001771 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001772 int i;
1773
1774 for (i = 0; i < elems; i++)
1775 kfree(iommu->domains[i]);
Jiang Liuffebeb42014-11-09 22:48:02 +08001776 kfree(iommu->domains);
1777 kfree(iommu->domain_ids);
1778 iommu->domains = NULL;
1779 iommu->domain_ids = NULL;
1780 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001781
Weidong Hand9630fe2008-12-08 11:06:32 +08001782 g_iommus[iommu->seq_id] = NULL;
1783
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001784 /* free context mapping */
1785 free_context_table(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001786
1787#ifdef CONFIG_INTEL_IOMMU_SVM
David Woodhousea222a7f2015-10-07 23:35:18 +01001788 if (pasid_enabled(iommu)) {
1789 if (ecap_prs(iommu->ecap))
1790 intel_svm_finish_prq(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001791 intel_svm_free_pasid_tables(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01001792 }
David Woodhouse8a94ade2015-03-24 14:54:56 +00001793#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001794}
1795
Jiang Liuab8dfe22014-07-11 14:19:27 +08001796static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001798 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799
1800 domain = alloc_domain_mem();
1801 if (!domain)
1802 return NULL;
1803
Jiang Liuab8dfe22014-07-11 14:19:27 +08001804 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001805 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001806 domain->flags = flags;
Omer Peleg0824c592016-04-20 19:03:35 +03001807 domain->has_iotlb_device = false;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001808 INIT_LIST_HEAD(&domain->devices);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001809
1810 return domain;
1811}
1812
Joerg Roedeld160aca2015-07-22 11:52:53 +02001813/* Must be called with iommu->lock */
1814static int domain_attach_iommu(struct dmar_domain *domain,
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001815 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001816{
Jiang Liu44bde612014-07-11 14:19:29 +08001817 unsigned long ndomains;
Joerg Roedel55d94042015-07-22 16:50:40 +02001818 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001819
Joerg Roedel55d94042015-07-22 16:50:40 +02001820 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001821 assert_spin_locked(&iommu->lock);
Jiang Liu44bde612014-07-11 14:19:29 +08001822
Joerg Roedel29a27712015-07-21 17:17:12 +02001823 domain->iommu_refcnt[iommu->seq_id] += 1;
1824 domain->iommu_count += 1;
1825 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
Jiang Liufb170fb2014-07-11 14:19:28 +08001826 ndomains = cap_ndoms(iommu->cap);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001827 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1828
1829 if (num >= ndomains) {
1830 pr_err("%s: No free domain ids\n", iommu->name);
1831 domain->iommu_refcnt[iommu->seq_id] -= 1;
1832 domain->iommu_count -= 1;
Joerg Roedel55d94042015-07-22 16:50:40 +02001833 return -ENOSPC;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001834 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001835
Joerg Roedeld160aca2015-07-22 11:52:53 +02001836 set_bit(num, iommu->domain_ids);
1837 set_iommu_domain(iommu, num, domain);
Jiang Liufb170fb2014-07-11 14:19:28 +08001838
Joerg Roedeld160aca2015-07-22 11:52:53 +02001839 domain->iommu_did[iommu->seq_id] = num;
1840 domain->nid = iommu->node;
1841
Jiang Liufb170fb2014-07-11 14:19:28 +08001842 domain_update_iommu_cap(domain);
1843 }
Joerg Roedeld160aca2015-07-22 11:52:53 +02001844
Joerg Roedel55d94042015-07-22 16:50:40 +02001845 return 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001846}
1847
1848static int domain_detach_iommu(struct dmar_domain *domain,
1849 struct intel_iommu *iommu)
1850{
Joerg Roedeld160aca2015-07-22 11:52:53 +02001851 int num, count = INT_MAX;
Jiang Liufb170fb2014-07-11 14:19:28 +08001852
Joerg Roedel55d94042015-07-22 16:50:40 +02001853 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001854 assert_spin_locked(&iommu->lock);
Jiang Liufb170fb2014-07-11 14:19:28 +08001855
Joerg Roedel29a27712015-07-21 17:17:12 +02001856 domain->iommu_refcnt[iommu->seq_id] -= 1;
1857 count = --domain->iommu_count;
1858 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02001859 num = domain->iommu_did[iommu->seq_id];
1860 clear_bit(num, iommu->domain_ids);
1861 set_iommu_domain(iommu, num, NULL);
1862
Jiang Liufb170fb2014-07-11 14:19:28 +08001863 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001864 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001865 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001866
1867 return count;
1868}
1869
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001871static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001872
Joseph Cihula51a63e62011-03-21 11:04:24 -07001873static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874{
1875 struct pci_dev *pdev = NULL;
1876 struct iova *iova;
1877 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001878
Zhen Leiaa3ac942017-09-21 16:52:45 +01001879 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001880
Mark Gross8a443df2008-03-04 14:59:31 -08001881 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1882 &reserved_rbtree_key);
1883
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001884 /* IOAPIC ranges shouldn't be accessed by DMA */
1885 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1886 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001887 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001888 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001889 return -ENODEV;
1890 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001891
1892 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1893 for_each_pci_dev(pdev) {
1894 struct resource *r;
1895
1896 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1897 r = &pdev->resource[i];
1898 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1899 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001900 iova = reserve_iova(&reserved_iova_list,
1901 IOVA_PFN(r->start),
1902 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001903 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001904 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001905 return -ENODEV;
1906 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001907 }
1908 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001909 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910}
1911
1912static void domain_reserve_special_ranges(struct dmar_domain *domain)
1913{
1914 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1915}
1916
1917static inline int guestwidth_to_adjustwidth(int gaw)
1918{
1919 int agaw;
1920 int r = (gaw - 12) % 9;
1921
1922 if (r == 0)
1923 agaw = gaw;
1924 else
1925 agaw = gaw + 9 - r;
1926 if (agaw > 64)
1927 agaw = 64;
1928 return agaw;
1929}
1930
Joerg Roedeldc534b22015-07-22 12:44:02 +02001931static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1932 int guest_width)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001934 int adjust_width, agaw;
1935 unsigned long sagaw;
Joerg Roedel13cf0172017-08-11 11:40:10 +02001936 int err;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937
Zhen Leiaa3ac942017-09-21 16:52:45 +01001938 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
Joerg Roedel13cf0172017-08-11 11:40:10 +02001939
1940 err = init_iova_flush_queue(&domain->iovad,
1941 iommu_flush_iova, iova_entry_free);
1942 if (err)
1943 return err;
1944
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 domain_reserve_special_ranges(domain);
1946
1947 /* calculate AGAW */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001948 if (guest_width > cap_mgaw(iommu->cap))
1949 guest_width = cap_mgaw(iommu->cap);
1950 domain->gaw = guest_width;
1951 adjust_width = guestwidth_to_adjustwidth(guest_width);
1952 agaw = width_to_agaw(adjust_width);
1953 sagaw = cap_sagaw(iommu->cap);
1954 if (!test_bit(agaw, &sagaw)) {
1955 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001956 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001957 agaw = find_next_bit(&sagaw, 5, agaw);
1958 if (agaw >= 5)
1959 return -ENODEV;
1960 }
1961 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001962
Weidong Han8e6040972008-12-08 15:49:06 +08001963 if (ecap_coherent(iommu->ecap))
1964 domain->iommu_coherency = 1;
1965 else
1966 domain->iommu_coherency = 0;
1967
Sheng Yang58c610b2009-03-18 15:33:05 +08001968 if (ecap_sc_support(iommu->ecap))
1969 domain->iommu_snooping = 1;
1970 else
1971 domain->iommu_snooping = 0;
1972
David Woodhouse214e39a2014-03-19 10:38:49 +00001973 if (intel_iommu_superpage)
1974 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1975 else
1976 domain->iommu_superpage = 0;
1977
Suresh Siddha4c923d42009-10-02 11:01:24 -07001978 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001979
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001980 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001981 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982 if (!domain->pgd)
1983 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001984 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001985 return 0;
1986}
1987
1988static void domain_exit(struct dmar_domain *domain)
1989{
David Woodhouseea8ea462014-03-05 17:09:32 +00001990 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001991
1992 /* Domain 0 is reserved, so dont process it */
1993 if (!domain)
1994 return;
1995
Joerg Roedeld160aca2015-07-22 11:52:53 +02001996 /* Remove associated devices and clear attached or cached domains */
1997 rcu_read_lock();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001998 domain_remove_dev_info(domain);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001999 rcu_read_unlock();
Jiang Liu92d03cc2014-02-19 14:07:28 +08002000
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002001 /* destroy iovas */
2002 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002003
David Woodhouseea8ea462014-03-05 17:09:32 +00002004 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002005
David Woodhouseea8ea462014-03-05 17:09:32 +00002006 dma_free_pagelist(freelist);
2007
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008 free_domain_mem(domain);
2009}
2010
David Woodhouse64ae8922014-03-09 12:52:30 -07002011static int domain_context_mapping_one(struct dmar_domain *domain,
2012 struct intel_iommu *iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002013 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002014{
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002015 u16 did = domain->iommu_did[iommu->seq_id];
Joerg Roedel28ccce02015-07-21 14:45:31 +02002016 int translation = CONTEXT_TT_MULTI_LEVEL;
2017 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002018 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002019 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08002020 struct dma_pte *pgd;
Joerg Roedel55d94042015-07-22 16:50:40 +02002021 int ret, agaw;
Joerg Roedel28ccce02015-07-21 14:45:31 +02002022
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002023 WARN_ON(did == 0);
2024
Joerg Roedel28ccce02015-07-21 14:45:31 +02002025 if (hw_pass_through && domain_type_is_si(domain))
2026 translation = CONTEXT_TT_PASS_THROUGH;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002027
2028 pr_debug("Set context mapping for %02x:%02x.%d\n",
2029 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002030
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002031 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08002032
Joerg Roedel55d94042015-07-22 16:50:40 +02002033 spin_lock_irqsave(&device_domain_lock, flags);
2034 spin_lock(&iommu->lock);
2035
2036 ret = -ENOMEM;
David Woodhouse03ecc322015-02-13 14:35:21 +00002037 context = iommu_context_addr(iommu, bus, devfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002038 if (!context)
Joerg Roedel55d94042015-07-22 16:50:40 +02002039 goto out_unlock;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002040
Joerg Roedel55d94042015-07-22 16:50:40 +02002041 ret = 0;
2042 if (context_present(context))
2043 goto out_unlock;
Joerg Roedelcf484d02015-06-12 12:21:46 +02002044
Xunlei Pangaec0e862016-12-05 20:09:07 +08002045 /*
2046 * For kdump cases, old valid entries may be cached due to the
2047 * in-flight DMA and copied pgtable, but there is no unmapping
2048 * behaviour for them, thus we need an explicit cache flush for
2049 * the newly-mapped device. For kdump, at this point, the device
2050 * is supposed to finish reset at its driver probe stage, so no
2051 * in-flight DMA will exist, and we don't need to worry anymore
2052 * hereafter.
2053 */
2054 if (context_copied(context)) {
2055 u16 did_old = context_domain_id(context);
2056
Christos Gkekasb117e032017-10-08 23:33:31 +01002057 if (did_old < cap_ndoms(iommu->cap)) {
Xunlei Pangaec0e862016-12-05 20:09:07 +08002058 iommu->flush.flush_context(iommu, did_old,
2059 (((u16)bus) << 8) | devfn,
2060 DMA_CCMD_MASK_NOBIT,
2061 DMA_CCMD_DEVICE_INVL);
KarimAllah Ahmedf73a7ee2017-05-05 11:39:59 -07002062 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2063 DMA_TLB_DSI_FLUSH);
2064 }
Xunlei Pangaec0e862016-12-05 20:09:07 +08002065 }
2066
Weidong Hanea6606b2008-12-08 23:08:15 +08002067 pgd = domain->pgd;
2068
Joerg Roedelde24e552015-07-21 14:53:04 +02002069 context_clear_entry(context);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002070 context_set_domain_id(context, did);
Weidong Hanea6606b2008-12-08 23:08:15 +08002071
Joerg Roedelde24e552015-07-21 14:53:04 +02002072 /*
2073 * Skip top levels of page tables for iommu which has less agaw
2074 * than default. Unnecessary for PT mode.
2075 */
Yu Zhao93a23a72009-05-18 13:51:37 +08002076 if (translation != CONTEXT_TT_PASS_THROUGH) {
Joerg Roedelde24e552015-07-21 14:53:04 +02002077 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
Joerg Roedel55d94042015-07-22 16:50:40 +02002078 ret = -ENOMEM;
Joerg Roedelde24e552015-07-21 14:53:04 +02002079 pgd = phys_to_virt(dma_pte_addr(pgd));
Joerg Roedel55d94042015-07-22 16:50:40 +02002080 if (!dma_pte_present(pgd))
2081 goto out_unlock;
Joerg Roedelde24e552015-07-21 14:53:04 +02002082 }
2083
David Woodhouse64ae8922014-03-09 12:52:30 -07002084 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002085 if (info && info->ats_supported)
2086 translation = CONTEXT_TT_DEV_IOTLB;
2087 else
2088 translation = CONTEXT_TT_MULTI_LEVEL;
Joerg Roedelde24e552015-07-21 14:53:04 +02002089
Yu Zhao93a23a72009-05-18 13:51:37 +08002090 context_set_address_root(context, virt_to_phys(pgd));
2091 context_set_address_width(context, iommu->agaw);
Joerg Roedelde24e552015-07-21 14:53:04 +02002092 } else {
2093 /*
2094 * In pass through mode, AW must be programmed to
2095 * indicate the largest AGAW value supported by
2096 * hardware. And ASR is ignored by hardware.
2097 */
2098 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08002099 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002100
2101 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00002102 context_set_fault_enable(context);
2103 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08002104 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002105
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002106 /*
2107 * It's a non-present to present mapping. If hardware doesn't cache
2108 * non-present entry we only need to flush the write-buffer. If the
2109 * _does_ cache non-present entries, then it does so in the special
2110 * domain #0, which we have to flush:
2111 */
2112 if (cap_caching_mode(iommu->cap)) {
2113 iommu->flush.flush_context(iommu, 0,
2114 (((u16)bus) << 8) | devfn,
2115 DMA_CCMD_MASK_NOBIT,
2116 DMA_CCMD_DEVICE_INVL);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002117 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002118 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002119 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002120 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002121 iommu_enable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08002122
Joerg Roedel55d94042015-07-22 16:50:40 +02002123 ret = 0;
2124
2125out_unlock:
2126 spin_unlock(&iommu->lock);
2127 spin_unlock_irqrestore(&device_domain_lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08002128
Wei Yang5c365d12016-07-13 13:53:21 +00002129 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002130}
2131
Alex Williamson579305f2014-07-03 09:51:43 -06002132struct domain_context_mapping_data {
2133 struct dmar_domain *domain;
2134 struct intel_iommu *iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002135};
2136
2137static int domain_context_mapping_cb(struct pci_dev *pdev,
2138 u16 alias, void *opaque)
2139{
2140 struct domain_context_mapping_data *data = opaque;
2141
2142 return domain_context_mapping_one(data->domain, data->iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002143 PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06002144}
2145
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002146static int
Joerg Roedel28ccce02015-07-21 14:45:31 +02002147domain_context_mapping(struct dmar_domain *domain, struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002148{
David Woodhouse64ae8922014-03-09 12:52:30 -07002149 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002150 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06002151 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002152
David Woodhousee1f167f2014-03-09 15:24:46 -07002153 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002154 if (!iommu)
2155 return -ENODEV;
2156
Alex Williamson579305f2014-07-03 09:51:43 -06002157 if (!dev_is_pci(dev))
Joerg Roedel28ccce02015-07-21 14:45:31 +02002158 return domain_context_mapping_one(domain, iommu, bus, devfn);
Alex Williamson579305f2014-07-03 09:51:43 -06002159
2160 data.domain = domain;
2161 data.iommu = iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002162
2163 return pci_for_each_dma_alias(to_pci_dev(dev),
2164 &domain_context_mapping_cb, &data);
2165}
2166
2167static int domain_context_mapped_cb(struct pci_dev *pdev,
2168 u16 alias, void *opaque)
2169{
2170 struct intel_iommu *iommu = opaque;
2171
2172 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002173}
2174
David Woodhousee1f167f2014-03-09 15:24:46 -07002175static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002176{
Weidong Han5331fe62008-12-08 23:00:00 +08002177 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002178 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002179
David Woodhousee1f167f2014-03-09 15:24:46 -07002180 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002181 if (!iommu)
2182 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002183
Alex Williamson579305f2014-07-03 09:51:43 -06002184 if (!dev_is_pci(dev))
2185 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002186
Alex Williamson579305f2014-07-03 09:51:43 -06002187 return !pci_for_each_dma_alias(to_pci_dev(dev),
2188 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002189}
2190
Fenghua Yuf5329592009-08-04 15:09:37 -07002191/* Returns a number of VTD pages, but aligned to MM page size */
2192static inline unsigned long aligned_nrpages(unsigned long host_addr,
2193 size_t size)
2194{
2195 host_addr &= ~PAGE_MASK;
2196 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2197}
2198
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002199/* Return largest possible superpage level for a given mapping */
2200static inline int hardware_largepage_caps(struct dmar_domain *domain,
2201 unsigned long iov_pfn,
2202 unsigned long phy_pfn,
2203 unsigned long pages)
2204{
2205 int support, level = 1;
2206 unsigned long pfnmerge;
2207
2208 support = domain->iommu_superpage;
2209
2210 /* To use a large page, the virtual *and* physical addresses
2211 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2212 of them will mean we have to use smaller pages. So just
2213 merge them and check both at once. */
2214 pfnmerge = iov_pfn | phy_pfn;
2215
2216 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2217 pages >>= VTD_STRIDE_SHIFT;
2218 if (!pages)
2219 break;
2220 pfnmerge >>= VTD_STRIDE_SHIFT;
2221 level++;
2222 support--;
2223 }
2224 return level;
2225}
2226
David Woodhouse9051aa02009-06-29 12:30:54 +01002227static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2228 struct scatterlist *sg, unsigned long phys_pfn,
2229 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002230{
2231 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002232 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002233 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002234 unsigned int largepage_lvl = 0;
2235 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002236
Jiang Liu162d1b12014-07-11 14:19:35 +08002237 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002238
2239 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2240 return -EINVAL;
2241
2242 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2243
Jiang Liucc4f14a2014-11-26 09:42:10 +08002244 if (!sg) {
2245 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002246 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2247 }
2248
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002249 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002250 uint64_t tmp;
2251
David Woodhousee1605492009-06-29 11:17:38 +01002252 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002253 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002254 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2255 sg->dma_length = sg->length;
Dan Williams3e6110f2015-12-15 12:54:06 -08002256 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002257 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002258 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002259
David Woodhousee1605492009-06-29 11:17:38 +01002260 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002261 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2262
David Woodhouse5cf0a762014-03-19 16:07:49 +00002263 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002264 if (!pte)
2265 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002266 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002267 if (largepage_lvl > 1) {
Christian Zanderba2374f2015-06-10 09:41:45 -07002268 unsigned long nr_superpages, end_pfn;
2269
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002270 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002271 lvl_pages = lvl_to_nr_pages(largepage_lvl);
Christian Zanderba2374f2015-06-10 09:41:45 -07002272
2273 nr_superpages = sg_res / lvl_pages;
2274 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2275
Jiang Liud41a4ad2014-07-11 14:19:34 +08002276 /*
2277 * Ensure that old small page tables are
Christian Zanderba2374f2015-06-10 09:41:45 -07002278 * removed to make room for superpage(s).
David Dillowbc24c572017-06-28 19:42:23 -07002279 * We're adding new large pages, so make sure
2280 * we don't remove their parent tables.
Jiang Liud41a4ad2014-07-11 14:19:34 +08002281 */
David Dillowbc24c572017-06-28 19:42:23 -07002282 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2283 largepage_lvl + 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002284 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002285 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002286 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002287
David Woodhousee1605492009-06-29 11:17:38 +01002288 }
2289 /* We don't need lock here, nobody else
2290 * touches the iova range
2291 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002292 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002293 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002294 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002295 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2296 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002297 if (dumps) {
2298 dumps--;
2299 debug_dma_dump_mappings(NULL);
2300 }
2301 WARN_ON(1);
2302 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002303
2304 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2305
2306 BUG_ON(nr_pages < lvl_pages);
2307 BUG_ON(sg_res < lvl_pages);
2308
2309 nr_pages -= lvl_pages;
2310 iov_pfn += lvl_pages;
2311 phys_pfn += lvl_pages;
2312 pteval += lvl_pages * VTD_PAGE_SIZE;
2313 sg_res -= lvl_pages;
2314
2315 /* If the next PTE would be the first in a new page, then we
2316 need to flush the cache on the entries we've just written.
2317 And then we'll need to recalculate 'pte', so clear it and
2318 let it get set again in the if (!pte) block above.
2319
2320 If we're done (!nr_pages) we need to flush the cache too.
2321
2322 Also if we've been setting superpages, we may need to
2323 recalculate 'pte' and switch back to smaller pages for the
2324 end of the mapping, if the trailing size is not enough to
2325 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002326 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002327 if (!nr_pages || first_pte_in_page(pte) ||
2328 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002329 domain_flush_cache(domain, first_pte,
2330 (void *)pte - (void *)first_pte);
2331 pte = NULL;
2332 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002333
2334 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002335 sg = sg_next(sg);
2336 }
2337 return 0;
2338}
2339
David Woodhouse9051aa02009-06-29 12:30:54 +01002340static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2341 struct scatterlist *sg, unsigned long nr_pages,
2342 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002343{
David Woodhouse9051aa02009-06-29 12:30:54 +01002344 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2345}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002346
David Woodhouse9051aa02009-06-29 12:30:54 +01002347static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2348 unsigned long phys_pfn, unsigned long nr_pages,
2349 int prot)
2350{
2351 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002352}
2353
Joerg Roedel2452d9d2015-07-23 16:20:14 +02002354static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002355{
Filippo Sironi50822192017-08-31 10:58:11 +02002356 unsigned long flags;
2357 struct context_entry *context;
2358 u16 did_old;
2359
Weidong Hanc7151a82008-12-08 22:51:37 +08002360 if (!iommu)
2361 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002362
Filippo Sironi50822192017-08-31 10:58:11 +02002363 spin_lock_irqsave(&iommu->lock, flags);
2364 context = iommu_context_addr(iommu, bus, devfn, 0);
2365 if (!context) {
2366 spin_unlock_irqrestore(&iommu->lock, flags);
2367 return;
2368 }
2369 did_old = context_domain_id(context);
2370 context_clear_entry(context);
2371 __iommu_flush_cache(iommu, context, sizeof(*context));
2372 spin_unlock_irqrestore(&iommu->lock, flags);
2373 iommu->flush.flush_context(iommu,
2374 did_old,
2375 (((u16)bus) << 8) | devfn,
2376 DMA_CCMD_MASK_NOBIT,
2377 DMA_CCMD_DEVICE_INVL);
2378 iommu->flush.flush_iotlb(iommu,
2379 did_old,
2380 0,
2381 0,
2382 DMA_TLB_DSI_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002383}
2384
David Woodhouse109b9b02012-05-25 17:43:02 +01002385static inline void unlink_domain_info(struct device_domain_info *info)
2386{
2387 assert_spin_locked(&device_domain_lock);
2388 list_del(&info->link);
2389 list_del(&info->global);
2390 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002391 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002392}
2393
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002394static void domain_remove_dev_info(struct dmar_domain *domain)
2395{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002396 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002397 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002398
2399 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel76f45fe2015-07-21 18:25:11 +02002400 list_for_each_entry_safe(info, tmp, &domain->devices, link)
Joerg Roedel127c7612015-07-23 17:44:46 +02002401 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002402 spin_unlock_irqrestore(&device_domain_lock, flags);
2403}
2404
2405/*
2406 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002407 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002408 */
David Woodhouse1525a292014-03-06 16:19:30 +00002409static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002410{
2411 struct device_domain_info *info;
2412
2413 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002414 info = dev->archdata.iommu;
Peter Xub316d022017-05-22 18:28:51 +08002415 if (likely(info))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002416 return info->domain;
2417 return NULL;
2418}
2419
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002420static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002421dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2422{
2423 struct device_domain_info *info;
2424
2425 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002426 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002427 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002428 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002429
2430 return NULL;
2431}
2432
Joerg Roedel5db31562015-07-22 12:40:43 +02002433static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2434 int bus, int devfn,
2435 struct device *dev,
2436 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002437{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002438 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002439 struct device_domain_info *info;
2440 unsigned long flags;
Joerg Roedeld160aca2015-07-22 11:52:53 +02002441 int ret;
Jiang Liu745f2582014-02-19 14:07:26 +08002442
2443 info = alloc_devinfo_mem();
2444 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002445 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002446
Jiang Liu745f2582014-02-19 14:07:26 +08002447 info->bus = bus;
2448 info->devfn = devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002449 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2450 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2451 info->ats_qdep = 0;
Jiang Liu745f2582014-02-19 14:07:26 +08002452 info->dev = dev;
2453 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002454 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002455
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002456 if (dev && dev_is_pci(dev)) {
2457 struct pci_dev *pdev = to_pci_dev(info->dev);
2458
2459 if (ecap_dev_iotlb_support(iommu->ecap) &&
2460 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2461 dmar_find_matched_atsr_unit(pdev))
2462 info->ats_supported = 1;
2463
2464 if (ecs_enabled(iommu)) {
2465 if (pasid_enabled(iommu)) {
2466 int features = pci_pasid_features(pdev);
2467 if (features >= 0)
2468 info->pasid_supported = features | 1;
2469 }
2470
2471 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2472 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2473 info->pri_supported = 1;
2474 }
2475 }
2476
Jiang Liu745f2582014-02-19 14:07:26 +08002477 spin_lock_irqsave(&device_domain_lock, flags);
2478 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002479 found = find_domain(dev);
Joerg Roedelf303e502015-07-23 18:37:13 +02002480
2481 if (!found) {
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002482 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002483 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
Joerg Roedelf303e502015-07-23 18:37:13 +02002484 if (info2) {
2485 found = info2->domain;
2486 info2->dev = dev;
2487 }
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002488 }
Joerg Roedelf303e502015-07-23 18:37:13 +02002489
Jiang Liu745f2582014-02-19 14:07:26 +08002490 if (found) {
2491 spin_unlock_irqrestore(&device_domain_lock, flags);
2492 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002493 /* Caller must free the original domain */
2494 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002495 }
2496
Joerg Roedeld160aca2015-07-22 11:52:53 +02002497 spin_lock(&iommu->lock);
2498 ret = domain_attach_iommu(domain, iommu);
2499 spin_unlock(&iommu->lock);
2500
2501 if (ret) {
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002502 spin_unlock_irqrestore(&device_domain_lock, flags);
Sudip Mukherjee499f3aa2015-09-18 16:27:07 +05302503 free_devinfo_mem(info);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002504 return NULL;
2505 }
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002506
David Woodhouseb718cd32014-03-09 13:11:33 -07002507 list_add(&info->link, &domain->devices);
2508 list_add(&info->global, &device_domain_list);
2509 if (dev)
2510 dev->archdata.iommu = info;
2511 spin_unlock_irqrestore(&device_domain_lock, flags);
2512
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002513 if (dev && domain_context_mapping(domain, dev)) {
2514 pr_err("Domain context map for %s failed\n", dev_name(dev));
Joerg Roedele6de0f82015-07-22 16:30:36 +02002515 dmar_remove_one_dev_info(domain, dev);
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002516 return NULL;
2517 }
2518
David Woodhouseb718cd32014-03-09 13:11:33 -07002519 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002520}
2521
Alex Williamson579305f2014-07-03 09:51:43 -06002522static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2523{
2524 *(u16 *)opaque = alias;
2525 return 0;
2526}
2527
Joerg Roedel76208352016-08-25 14:25:12 +02002528static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002529{
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002530 struct device_domain_info *info = NULL;
Joerg Roedel76208352016-08-25 14:25:12 +02002531 struct dmar_domain *domain = NULL;
Alex Williamson579305f2014-07-03 09:51:43 -06002532 struct intel_iommu *iommu;
Joerg Roedel08a7f452015-07-23 18:09:11 +02002533 u16 req_id, dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002534 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002535 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002536
David Woodhouse146922e2014-03-09 15:44:17 -07002537 iommu = device_to_iommu(dev, &bus, &devfn);
2538 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002539 return NULL;
2540
Joerg Roedel08a7f452015-07-23 18:09:11 +02002541 req_id = ((u16)bus << 8) | devfn;
2542
Alex Williamson579305f2014-07-03 09:51:43 -06002543 if (dev_is_pci(dev)) {
2544 struct pci_dev *pdev = to_pci_dev(dev);
2545
2546 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2547
2548 spin_lock_irqsave(&device_domain_lock, flags);
2549 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2550 PCI_BUS_NUM(dma_alias),
2551 dma_alias & 0xff);
2552 if (info) {
2553 iommu = info->iommu;
2554 domain = info->domain;
2555 }
2556 spin_unlock_irqrestore(&device_domain_lock, flags);
2557
Joerg Roedel76208352016-08-25 14:25:12 +02002558 /* DMA alias already has a domain, use it */
Alex Williamson579305f2014-07-03 09:51:43 -06002559 if (info)
Joerg Roedel76208352016-08-25 14:25:12 +02002560 goto out;
Alex Williamson579305f2014-07-03 09:51:43 -06002561 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002562
David Woodhouse146922e2014-03-09 15:44:17 -07002563 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002564 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002565 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002566 return NULL;
Joerg Roedeldc534b22015-07-22 12:44:02 +02002567 if (domain_init(domain, iommu, gaw)) {
Alex Williamson579305f2014-07-03 09:51:43 -06002568 domain_exit(domain);
2569 return NULL;
2570 }
2571
Joerg Roedel76208352016-08-25 14:25:12 +02002572out:
Alex Williamson579305f2014-07-03 09:51:43 -06002573
Joerg Roedel76208352016-08-25 14:25:12 +02002574 return domain;
2575}
2576
2577static struct dmar_domain *set_domain_for_dev(struct device *dev,
2578 struct dmar_domain *domain)
2579{
2580 struct intel_iommu *iommu;
2581 struct dmar_domain *tmp;
2582 u16 req_id, dma_alias;
2583 u8 bus, devfn;
2584
2585 iommu = device_to_iommu(dev, &bus, &devfn);
2586 if (!iommu)
2587 return NULL;
2588
2589 req_id = ((u16)bus << 8) | devfn;
2590
2591 if (dev_is_pci(dev)) {
2592 struct pci_dev *pdev = to_pci_dev(dev);
2593
2594 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2595
2596 /* register PCI DMA alias device */
2597 if (req_id != dma_alias) {
2598 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2599 dma_alias & 0xff, NULL, domain);
2600
2601 if (!tmp || tmp != domain)
2602 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002603 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002604 }
2605
Joerg Roedel5db31562015-07-22 12:40:43 +02002606 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
Joerg Roedel76208352016-08-25 14:25:12 +02002607 if (!tmp || tmp != domain)
2608 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002609
Joerg Roedel76208352016-08-25 14:25:12 +02002610 return domain;
2611}
2612
2613static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2614{
2615 struct dmar_domain *domain, *tmp;
2616
2617 domain = find_domain(dev);
2618 if (domain)
2619 goto out;
2620
2621 domain = find_or_alloc_domain(dev, gaw);
2622 if (!domain)
2623 goto out;
2624
2625 tmp = set_domain_for_dev(dev, domain);
2626 if (!tmp || domain != tmp) {
Alex Williamson579305f2014-07-03 09:51:43 -06002627 domain_exit(domain);
2628 domain = tmp;
2629 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002630
Joerg Roedel76208352016-08-25 14:25:12 +02002631out:
2632
David Woodhouseb718cd32014-03-09 13:11:33 -07002633 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002634}
2635
David Woodhouseb2132032009-06-26 18:50:28 +01002636static int iommu_domain_identity_map(struct dmar_domain *domain,
2637 unsigned long long start,
2638 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002639{
David Woodhousec5395d52009-06-28 16:35:56 +01002640 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2641 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642
David Woodhousec5395d52009-06-28 16:35:56 +01002643 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2644 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002645 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002646 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002647 }
2648
Joerg Roedelaf1089c2015-07-21 15:45:19 +02002649 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002650 /*
2651 * RMRR range might have overlap with physical memory range,
2652 * clear it first
2653 */
David Woodhousec5395d52009-06-28 16:35:56 +01002654 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002655
David Woodhousec5395d52009-06-28 16:35:56 +01002656 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2657 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002658 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002659}
2660
Joerg Roedeld66ce542015-09-23 19:00:10 +02002661static int domain_prepare_identity_map(struct device *dev,
2662 struct dmar_domain *domain,
2663 unsigned long long start,
2664 unsigned long long end)
David Woodhouseb2132032009-06-26 18:50:28 +01002665{
David Woodhouse19943b02009-08-04 16:19:20 +01002666 /* For _hardware_ passthrough, don't bother. But for software
2667 passthrough, we do it anyway -- it may indicate a memory
2668 range which is reserved in E820, so which didn't get set
2669 up to start with in si_domain */
2670 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002671 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2672 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002673 return 0;
2674 }
2675
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002676 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2677 dev_name(dev), start, end);
2678
David Woodhouse5595b522009-12-02 09:21:55 +00002679 if (end < start) {
2680 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2681 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2682 dmi_get_system_info(DMI_BIOS_VENDOR),
2683 dmi_get_system_info(DMI_BIOS_VERSION),
2684 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002685 return -EIO;
David Woodhouse5595b522009-12-02 09:21:55 +00002686 }
2687
David Woodhouse2ff729f2009-08-26 14:25:41 +01002688 if (end >> agaw_to_width(domain->agaw)) {
2689 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2690 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2691 agaw_to_width(domain->agaw),
2692 dmi_get_system_info(DMI_BIOS_VENDOR),
2693 dmi_get_system_info(DMI_BIOS_VERSION),
2694 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002695 return -EIO;
David Woodhouse2ff729f2009-08-26 14:25:41 +01002696 }
David Woodhouse19943b02009-08-04 16:19:20 +01002697
Joerg Roedeld66ce542015-09-23 19:00:10 +02002698 return iommu_domain_identity_map(domain, start, end);
2699}
2700
2701static int iommu_prepare_identity_map(struct device *dev,
2702 unsigned long long start,
2703 unsigned long long end)
2704{
2705 struct dmar_domain *domain;
2706 int ret;
2707
2708 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2709 if (!domain)
2710 return -ENOMEM;
2711
2712 ret = domain_prepare_identity_map(dev, domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002713 if (ret)
Joerg Roedeld66ce542015-09-23 19:00:10 +02002714 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002715
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002717}
2718
2719static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002720 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002721{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002722 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002723 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002724 return iommu_prepare_identity_map(dev, rmrr->base_address,
2725 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002726}
2727
Suresh Siddhad3f13812011-08-23 17:05:25 -07002728#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002729static inline void iommu_prepare_isa(void)
2730{
2731 struct pci_dev *pdev;
2732 int ret;
2733
2734 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2735 if (!pdev)
2736 return;
2737
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002738 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002739 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002740
2741 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002742 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002743
Yijing Wang9b27e822014-05-20 20:37:52 +08002744 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002745}
2746#else
2747static inline void iommu_prepare_isa(void)
2748{
2749 return;
2750}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002751#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002752
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002753static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002754
Matt Kraai071e1372009-08-23 22:30:22 -07002755static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002756{
David Woodhousec7ab48d2009-06-26 19:10:36 +01002757 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002758
Jiang Liuab8dfe22014-07-11 14:19:27 +08002759 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002760 if (!si_domain)
2761 return -EFAULT;
2762
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002763 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2764 domain_exit(si_domain);
2765 return -EFAULT;
2766 }
2767
Joerg Roedel0dc79712015-07-21 15:40:06 +02002768 pr_debug("Identity mapping domain allocated\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002769
David Woodhouse19943b02009-08-04 16:19:20 +01002770 if (hw)
2771 return 0;
2772
David Woodhousec7ab48d2009-06-26 19:10:36 +01002773 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002774 unsigned long start_pfn, end_pfn;
2775 int i;
2776
2777 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2778 ret = iommu_domain_identity_map(si_domain,
2779 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2780 if (ret)
2781 return ret;
2782 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002783 }
2784
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002785 return 0;
2786}
2787
David Woodhouse9b226622014-03-09 14:03:28 -07002788static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002789{
2790 struct device_domain_info *info;
2791
2792 if (likely(!iommu_identity_mapping))
2793 return 0;
2794
David Woodhouse9b226622014-03-09 14:03:28 -07002795 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002796 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2797 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002798
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002799 return 0;
2800}
2801
Joerg Roedel28ccce02015-07-21 14:45:31 +02002802static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002803{
David Woodhouse0ac72662014-03-09 13:19:22 -07002804 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002805 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002806 u8 bus, devfn;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002807
David Woodhouse5913c9b2014-03-09 16:27:31 -07002808 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002809 if (!iommu)
2810 return -ENODEV;
2811
Joerg Roedel5db31562015-07-22 12:40:43 +02002812 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002813 if (ndomain != domain)
2814 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002815
2816 return 0;
2817}
2818
David Woodhouse0b9d9752014-03-09 15:48:15 -07002819static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002820{
2821 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002822 struct device *tmp;
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002823 int i;
2824
Jiang Liu0e242612014-02-19 14:07:34 +08002825 rcu_read_lock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002826 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002827 /*
2828 * Return TRUE if this RMRR contains the device that
2829 * is passed in.
2830 */
2831 for_each_active_dev_scope(rmrr->devices,
2832 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002833 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002834 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002835 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002836 }
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002837 }
Jiang Liu0e242612014-02-19 14:07:34 +08002838 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002839 return false;
2840}
2841
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002842/*
2843 * There are a couple cases where we need to restrict the functionality of
2844 * devices associated with RMRRs. The first is when evaluating a device for
2845 * identity mapping because problems exist when devices are moved in and out
2846 * of domains and their respective RMRR information is lost. This means that
2847 * a device with associated RMRRs will never be in a "passthrough" domain.
2848 * The second is use of the device through the IOMMU API. This interface
2849 * expects to have full control of the IOVA space for the device. We cannot
2850 * satisfy both the requirement that RMRR access is maintained and have an
2851 * unencumbered IOVA space. We also have no ability to quiesce the device's
2852 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2853 * We therefore prevent devices associated with an RMRR from participating in
2854 * the IOMMU API, which eliminates them from device assignment.
2855 *
2856 * In both cases we assume that PCI USB devices with RMRRs have them largely
2857 * for historical reasons and that the RMRR space is not actively used post
2858 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002859 *
2860 * The same exception is made for graphics devices, with the requirement that
2861 * any use of the RMRR regions will be torn down before assigning the device
2862 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002863 */
2864static bool device_is_rmrr_locked(struct device *dev)
2865{
2866 if (!device_has_rmrr(dev))
2867 return false;
2868
2869 if (dev_is_pci(dev)) {
2870 struct pci_dev *pdev = to_pci_dev(dev);
2871
David Woodhouse18436af2015-03-25 15:05:47 +00002872 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002873 return false;
2874 }
2875
2876 return true;
2877}
2878
David Woodhouse3bdb2592014-03-09 16:03:08 -07002879static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002880{
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002881
David Woodhouse3bdb2592014-03-09 16:03:08 -07002882 if (dev_is_pci(dev)) {
2883 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002884
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002885 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002886 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002887
David Woodhouse3bdb2592014-03-09 16:03:08 -07002888 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2889 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002890
David Woodhouse3bdb2592014-03-09 16:03:08 -07002891 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2892 return 1;
2893
2894 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2895 return 0;
2896
2897 /*
2898 * We want to start off with all devices in the 1:1 domain, and
2899 * take them out later if we find they can't access all of memory.
2900 *
2901 * However, we can't do this for PCI devices behind bridges,
2902 * because all PCI devices behind the same bridge will end up
2903 * with the same source-id on their transactions.
2904 *
2905 * Practically speaking, we can't change things around for these
2906 * devices at run-time, because we can't be sure there'll be no
2907 * DMA transactions in flight for any of their siblings.
2908 *
2909 * So PCI devices (unless they're on the root bus) as well as
2910 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2911 * the 1:1 domain, just in _case_ one of their siblings turns out
2912 * not to be able to map all of memory.
2913 */
2914 if (!pci_is_pcie(pdev)) {
2915 if (!pci_is_root_bus(pdev->bus))
2916 return 0;
2917 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2918 return 0;
2919 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2920 return 0;
2921 } else {
2922 if (device_has_rmrr(dev))
2923 return 0;
2924 }
David Woodhouse6941af22009-07-04 18:24:27 +01002925
David Woodhouse3dfc8132009-07-04 19:11:08 +01002926 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002927 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002928 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002929 * take them out of the 1:1 domain later.
2930 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002931 if (!startup) {
2932 /*
2933 * If the device's dma_mask is less than the system's memory
2934 * size then this is not a candidate for identity mapping.
2935 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002936 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002937
David Woodhouse3bdb2592014-03-09 16:03:08 -07002938 if (dev->coherent_dma_mask &&
2939 dev->coherent_dma_mask < dma_mask)
2940 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002941
David Woodhouse3bdb2592014-03-09 16:03:08 -07002942 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002943 }
David Woodhouse6941af22009-07-04 18:24:27 +01002944
2945 return 1;
2946}
2947
David Woodhousecf04eee2014-03-21 16:49:04 +00002948static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2949{
2950 int ret;
2951
2952 if (!iommu_should_identity_map(dev, 1))
2953 return 0;
2954
Joerg Roedel28ccce02015-07-21 14:45:31 +02002955 ret = domain_add_dev_info(si_domain, dev);
David Woodhousecf04eee2014-03-21 16:49:04 +00002956 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002957 pr_info("%s identity mapping for device %s\n",
2958 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002959 else if (ret == -ENODEV)
2960 /* device not associated with an iommu */
2961 ret = 0;
2962
2963 return ret;
2964}
2965
2966
Matt Kraai071e1372009-08-23 22:30:22 -07002967static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002968{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002969 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002970 struct dmar_drhd_unit *drhd;
2971 struct intel_iommu *iommu;
2972 struct device *dev;
2973 int i;
2974 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002975
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002976 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002977 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2978 if (ret)
2979 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002980 }
2981
David Woodhousecf04eee2014-03-21 16:49:04 +00002982 for_each_active_iommu(iommu, drhd)
2983 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2984 struct acpi_device_physical_node *pn;
2985 struct acpi_device *adev;
2986
2987 if (dev->bus != &acpi_bus_type)
2988 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002989
David Woodhousecf04eee2014-03-21 16:49:04 +00002990 adev= to_acpi_device(dev);
2991 mutex_lock(&adev->physical_node_lock);
2992 list_for_each_entry(pn, &adev->physical_node_list, node) {
2993 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2994 if (ret)
2995 break;
2996 }
2997 mutex_unlock(&adev->physical_node_lock);
2998 if (ret)
2999 return ret;
3000 }
3001
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003002 return 0;
3003}
3004
Jiang Liuffebeb42014-11-09 22:48:02 +08003005static void intel_iommu_init_qi(struct intel_iommu *iommu)
3006{
3007 /*
3008 * Start from the sane iommu hardware state.
3009 * If the queued invalidation is already initialized by us
3010 * (for example, while enabling interrupt-remapping) then
3011 * we got the things already rolling from a sane state.
3012 */
3013 if (!iommu->qi) {
3014 /*
3015 * Clear any previous faults.
3016 */
3017 dmar_fault(-1, iommu);
3018 /*
3019 * Disable queued invalidation if supported and already enabled
3020 * before OS handover.
3021 */
3022 dmar_disable_qi(iommu);
3023 }
3024
3025 if (dmar_enable_qi(iommu)) {
3026 /*
3027 * Queued Invalidate not enabled, use Register Based Invalidate
3028 */
3029 iommu->flush.flush_context = __iommu_flush_context;
3030 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003031 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003032 iommu->name);
3033 } else {
3034 iommu->flush.flush_context = qi_flush_context;
3035 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003036 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08003037 }
3038}
3039
Joerg Roedel091d42e2015-06-12 11:56:10 +02003040static int copy_context_table(struct intel_iommu *iommu,
Dan Williamsdfddb962015-10-09 18:16:46 -04003041 struct root_entry *old_re,
Joerg Roedel091d42e2015-06-12 11:56:10 +02003042 struct context_entry **tbl,
3043 int bus, bool ext)
3044{
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003045 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003046 struct context_entry *new_ce = NULL, ce;
Dan Williamsdfddb962015-10-09 18:16:46 -04003047 struct context_entry *old_ce = NULL;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003048 struct root_entry re;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003049 phys_addr_t old_ce_phys;
3050
3051 tbl_idx = ext ? bus * 2 : bus;
Dan Williamsdfddb962015-10-09 18:16:46 -04003052 memcpy(&re, old_re, sizeof(re));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003053
3054 for (devfn = 0; devfn < 256; devfn++) {
3055 /* First calculate the correct index */
3056 idx = (ext ? devfn * 2 : devfn) % 256;
3057
3058 if (idx == 0) {
3059 /* First save what we may have and clean up */
3060 if (new_ce) {
3061 tbl[tbl_idx] = new_ce;
3062 __iommu_flush_cache(iommu, new_ce,
3063 VTD_PAGE_SIZE);
3064 pos = 1;
3065 }
3066
3067 if (old_ce)
3068 iounmap(old_ce);
3069
3070 ret = 0;
3071 if (devfn < 0x80)
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003072 old_ce_phys = root_entry_lctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003073 else
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003074 old_ce_phys = root_entry_uctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003075
3076 if (!old_ce_phys) {
3077 if (ext && devfn == 0) {
3078 /* No LCTP, try UCTP */
3079 devfn = 0x7f;
3080 continue;
3081 } else {
3082 goto out;
3083 }
3084 }
3085
3086 ret = -ENOMEM;
Dan Williamsdfddb962015-10-09 18:16:46 -04003087 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3088 MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003089 if (!old_ce)
3090 goto out;
3091
3092 new_ce = alloc_pgtable_page(iommu->node);
3093 if (!new_ce)
3094 goto out_unmap;
3095
3096 ret = 0;
3097 }
3098
3099 /* Now copy the context entry */
Dan Williamsdfddb962015-10-09 18:16:46 -04003100 memcpy(&ce, old_ce + idx, sizeof(ce));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003101
Joerg Roedelcf484d02015-06-12 12:21:46 +02003102 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02003103 continue;
3104
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003105 did = context_domain_id(&ce);
3106 if (did >= 0 && did < cap_ndoms(iommu->cap))
3107 set_bit(did, iommu->domain_ids);
3108
Joerg Roedelcf484d02015-06-12 12:21:46 +02003109 /*
3110 * We need a marker for copied context entries. This
3111 * marker needs to work for the old format as well as
3112 * for extended context entries.
3113 *
3114 * Bit 67 of the context entry is used. In the old
3115 * format this bit is available to software, in the
3116 * extended format it is the PGE bit, but PGE is ignored
3117 * by HW if PASIDs are disabled (and thus still
3118 * available).
3119 *
3120 * So disable PASIDs first and then mark the entry
3121 * copied. This means that we don't copy PASID
3122 * translations from the old kernel, but this is fine as
3123 * faults there are not fatal.
3124 */
3125 context_clear_pasid_enable(&ce);
3126 context_set_copied(&ce);
3127
Joerg Roedel091d42e2015-06-12 11:56:10 +02003128 new_ce[idx] = ce;
3129 }
3130
3131 tbl[tbl_idx + pos] = new_ce;
3132
3133 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3134
3135out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003136 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003137
3138out:
3139 return ret;
3140}
3141
3142static int copy_translation_tables(struct intel_iommu *iommu)
3143{
3144 struct context_entry **ctxt_tbls;
Dan Williamsdfddb962015-10-09 18:16:46 -04003145 struct root_entry *old_rt;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003146 phys_addr_t old_rt_phys;
3147 int ctxt_table_entries;
3148 unsigned long flags;
3149 u64 rtaddr_reg;
3150 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02003151 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003152
3153 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3154 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02003155 new_ext = !!ecap_ecs(iommu->ecap);
3156
3157 /*
3158 * The RTT bit can only be changed when translation is disabled,
3159 * but disabling translation means to open a window for data
3160 * corruption. So bail out and don't copy anything if we would
3161 * have to change the bit.
3162 */
3163 if (new_ext != ext)
3164 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003165
3166 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3167 if (!old_rt_phys)
3168 return -EINVAL;
3169
Dan Williamsdfddb962015-10-09 18:16:46 -04003170 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003171 if (!old_rt)
3172 return -ENOMEM;
3173
3174 /* This is too big for the stack - allocate it from slab */
3175 ctxt_table_entries = ext ? 512 : 256;
3176 ret = -ENOMEM;
3177 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3178 if (!ctxt_tbls)
3179 goto out_unmap;
3180
3181 for (bus = 0; bus < 256; bus++) {
3182 ret = copy_context_table(iommu, &old_rt[bus],
3183 ctxt_tbls, bus, ext);
3184 if (ret) {
3185 pr_err("%s: Failed to copy context table for bus %d\n",
3186 iommu->name, bus);
3187 continue;
3188 }
3189 }
3190
3191 spin_lock_irqsave(&iommu->lock, flags);
3192
3193 /* Context tables are copied, now write them to the root_entry table */
3194 for (bus = 0; bus < 256; bus++) {
3195 int idx = ext ? bus * 2 : bus;
3196 u64 val;
3197
3198 if (ctxt_tbls[idx]) {
3199 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3200 iommu->root_entry[bus].lo = val;
3201 }
3202
3203 if (!ext || !ctxt_tbls[idx + 1])
3204 continue;
3205
3206 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3207 iommu->root_entry[bus].hi = val;
3208 }
3209
3210 spin_unlock_irqrestore(&iommu->lock, flags);
3211
3212 kfree(ctxt_tbls);
3213
3214 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3215
3216 ret = 0;
3217
3218out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003219 memunmap(old_rt);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003220
3221 return ret;
3222}
3223
Joseph Cihulab7792602011-05-03 00:08:37 -07003224static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003225{
3226 struct dmar_drhd_unit *drhd;
3227 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003228 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003229 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003230 struct intel_iommu *iommu;
Joerg Roedel13cf0172017-08-11 11:40:10 +02003231 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003232
3233 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003234 * for each drhd
3235 * allocate root
3236 * initialize and program root entry to not present
3237 * endfor
3238 */
3239 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003240 /*
3241 * lock not needed as this is only incremented in the single
3242 * threaded kernel __init code path all other access are read
3243 * only
3244 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003245 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003246 g_num_of_iommus++;
3247 continue;
3248 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003249 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003250 }
3251
Jiang Liuffebeb42014-11-09 22:48:02 +08003252 /* Preallocate enough resources for IOMMU hot-addition */
3253 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3254 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3255
Weidong Hand9630fe2008-12-08 11:06:32 +08003256 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3257 GFP_KERNEL);
3258 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003259 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003260 ret = -ENOMEM;
3261 goto error;
3262 }
3263
Jiang Liu7c919772014-01-06 14:18:18 +08003264 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003265 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003266
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003267 intel_iommu_init_qi(iommu);
3268
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003269 ret = iommu_init_domains(iommu);
3270 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003271 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003272
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003273 init_translation_status(iommu);
3274
Joerg Roedel091d42e2015-06-12 11:56:10 +02003275 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3276 iommu_disable_translation(iommu);
3277 clear_translation_pre_enabled(iommu);
3278 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3279 iommu->name);
3280 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003281
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003282 /*
3283 * TBD:
3284 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003285 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003286 */
3287 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003288 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003289 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003290
Joerg Roedel091d42e2015-06-12 11:56:10 +02003291 if (translation_pre_enabled(iommu)) {
3292 pr_info("Translation already enabled - trying to copy translation structures\n");
3293
3294 ret = copy_translation_tables(iommu);
3295 if (ret) {
3296 /*
3297 * We found the IOMMU with translation
3298 * enabled - but failed to copy over the
3299 * old root-entry table. Try to proceed
3300 * by disabling translation now and
3301 * allocating a clean root-entry table.
3302 * This might cause DMAR faults, but
3303 * probably the dump will still succeed.
3304 */
3305 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3306 iommu->name);
3307 iommu_disable_translation(iommu);
3308 clear_translation_pre_enabled(iommu);
3309 } else {
3310 pr_info("Copied translation tables from previous kernel for %s\n",
3311 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003312 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003313 }
3314 }
3315
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003316 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003317 hw_pass_through = 0;
David Woodhouse8a94ade2015-03-24 14:54:56 +00003318#ifdef CONFIG_INTEL_IOMMU_SVM
3319 if (pasid_enabled(iommu))
3320 intel_svm_alloc_pasid_tables(iommu);
3321#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003322 }
3323
Joerg Roedela4c34ff2016-06-17 11:29:48 +02003324 /*
3325 * Now that qi is enabled on all iommus, set the root entry and flush
3326 * caches. This is required on some Intel X58 chipsets, otherwise the
3327 * flush_context function will loop forever and the boot hangs.
3328 */
3329 for_each_active_iommu(iommu, drhd) {
3330 iommu_flush_write_buffer(iommu);
3331 iommu_set_root_entry(iommu);
3332 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3333 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3334 }
3335
David Woodhouse19943b02009-08-04 16:19:20 +01003336 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003337 iommu_identity_mapping |= IDENTMAP_ALL;
3338
Suresh Siddhad3f13812011-08-23 17:05:25 -07003339#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003340 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003341#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003342
Ashok Raj21e722c2017-01-30 09:39:53 -08003343 check_tylersburg_isoch();
3344
Joerg Roedel86080cc2015-06-12 12:27:16 +02003345 if (iommu_identity_mapping) {
3346 ret = si_domain_init(hw_pass_through);
3347 if (ret)
3348 goto free_iommu;
3349 }
3350
David Woodhousee0fc7e02009-09-30 09:12:17 -07003351
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003352 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003353 * If we copied translations from a previous kernel in the kdump
3354 * case, we can not assign the devices to domains now, as that
3355 * would eliminate the old mappings. So skip this part and defer
3356 * the assignment to device driver initialization time.
3357 */
3358 if (copied_tables)
3359 goto domains_done;
3360
3361 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003362 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003363 * identity mappings for rmrr, gfx, and isa and may fall back to static
3364 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003365 */
David Woodhouse19943b02009-08-04 16:19:20 +01003366 if (iommu_identity_mapping) {
3367 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3368 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003369 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003370 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003371 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003372 }
David Woodhouse19943b02009-08-04 16:19:20 +01003373 /*
3374 * For each rmrr
3375 * for each dev attached to rmrr
3376 * do
3377 * locate drhd for dev, alloc domain for dev
3378 * allocate free domain
3379 * allocate page table entries for rmrr
3380 * if context not allocated for bus
3381 * allocate and init context
3382 * set present in root table for this bus
3383 * init context with domain, translation etc
3384 * endfor
3385 * endfor
3386 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003387 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003388 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003389 /* some BIOS lists non-exist devices in DMAR table. */
3390 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003391 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003392 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003393 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003394 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003395 }
3396 }
3397
3398 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003399
Joerg Roedela87f4912015-06-12 12:32:54 +02003400domains_done:
3401
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003402 /*
3403 * for each drhd
3404 * enable fault log
3405 * global invalidate context cache
3406 * global invalidate iotlb
3407 * enable translation
3408 */
Jiang Liu7c919772014-01-06 14:18:18 +08003409 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003410 if (drhd->ignored) {
3411 /*
3412 * we always have to disable PMRs or DMA may fail on
3413 * this device
3414 */
3415 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003416 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003417 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003418 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003419
3420 iommu_flush_write_buffer(iommu);
3421
David Woodhousea222a7f2015-10-07 23:35:18 +01003422#ifdef CONFIG_INTEL_IOMMU_SVM
3423 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3424 ret = intel_svm_enable_prq(iommu);
3425 if (ret)
3426 goto free_iommu;
3427 }
3428#endif
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003429 ret = dmar_set_interrupt(iommu);
3430 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003431 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003432
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003433 if (!translation_pre_enabled(iommu))
3434 iommu_enable_translation(iommu);
3435
David Woodhouseb94996c2009-09-19 15:28:12 -07003436 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003437 }
3438
3439 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003440
3441free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003442 for_each_active_iommu(iommu, drhd) {
3443 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003444 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003445 }
Joerg Roedel13cf0172017-08-11 11:40:10 +02003446
Weidong Hand9630fe2008-12-08 11:06:32 +08003447 kfree(g_iommus);
Joerg Roedel13cf0172017-08-11 11:40:10 +02003448
Jiang Liu989d51f2014-02-19 14:07:21 +08003449error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003450 return ret;
3451}
3452
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003453/* This takes a number of _MM_ pages, not VTD pages */
Omer Peleg2aac6302016-04-20 11:33:57 +03003454static unsigned long intel_alloc_iova(struct device *dev,
David Woodhouse875764d2009-06-28 21:20:51 +01003455 struct dmar_domain *domain,
3456 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003457{
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003458 unsigned long iova_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003459
David Woodhouse875764d2009-06-28 21:20:51 +01003460 /* Restrict dma_mask to the width that the iommu can handle */
3461 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
Robin Murphy8f6429c2015-07-16 19:40:12 +01003462 /* Ensure we reserve the whole size-aligned region */
3463 nrpages = __roundup_pow_of_two(nrpages);
David Woodhouse875764d2009-06-28 21:20:51 +01003464
3465 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003466 /*
3467 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003468 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003469 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003470 */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003471 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003472 IOVA_PFN(DMA_BIT_MASK(32)), false);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003473 if (iova_pfn)
3474 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003475 }
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003476 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3477 IOVA_PFN(dma_mask), true);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003478 if (unlikely(!iova_pfn)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003479 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003480 nrpages, dev_name(dev));
Omer Peleg2aac6302016-04-20 11:33:57 +03003481 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003482 }
3483
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003484 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003485}
3486
Peter Xub316d022017-05-22 18:28:51 +08003487static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003488{
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003489 struct dmar_domain *domain, *tmp;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003490 struct dmar_rmrr_unit *rmrr;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003491 struct device *i_dev;
3492 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003493
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003494 domain = find_domain(dev);
3495 if (domain)
3496 goto out;
3497
3498 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3499 if (!domain)
3500 goto out;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003501
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003502 /* We have a new domain - setup possible RMRRs for the device */
3503 rcu_read_lock();
3504 for_each_rmrr_units(rmrr) {
3505 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3506 i, i_dev) {
3507 if (i_dev != dev)
3508 continue;
3509
3510 ret = domain_prepare_identity_map(dev, domain,
3511 rmrr->base_address,
3512 rmrr->end_address);
3513 if (ret)
3514 dev_err(dev, "Mapping reserved region failed\n");
3515 }
3516 }
3517 rcu_read_unlock();
3518
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003519 tmp = set_domain_for_dev(dev, domain);
3520 if (!tmp || domain != tmp) {
3521 domain_exit(domain);
3522 domain = tmp;
3523 }
3524
3525out:
3526
3527 if (!domain)
3528 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3529
3530
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003531 return domain;
3532}
3533
David Woodhouseecb509e2014-03-09 16:29:55 -07003534/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003535static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003536{
3537 int found;
3538
David Woodhouse3d891942014-03-06 15:59:26 +00003539 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003540 return 1;
3541
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003542 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003543 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003544
David Woodhouse9b226622014-03-09 14:03:28 -07003545 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003546 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003547 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003548 return 1;
3549 else {
3550 /*
3551 * 32 bit DMA is removed from si_domain and fall back
3552 * to non-identity mapping.
3553 */
Joerg Roedele6de0f82015-07-22 16:30:36 +02003554 dmar_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003555 pr_info("32bit %s uses non-identity mapping\n",
3556 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003557 return 0;
3558 }
3559 } else {
3560 /*
3561 * In case of a detached 64 bit DMA device from vm, the device
3562 * is put into si_domain for identity mapping.
3563 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003564 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003565 int ret;
Joerg Roedel28ccce02015-07-21 14:45:31 +02003566 ret = domain_add_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003567 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003568 pr_info("64bit %s uses identity mapping\n",
3569 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003570 return 1;
3571 }
3572 }
3573 }
3574
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003575 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003576}
3577
David Woodhouse5040a912014-03-09 16:14:00 -07003578static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003579 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003580{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003581 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003582 phys_addr_t start_paddr;
Omer Peleg2aac6302016-04-20 11:33:57 +03003583 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003584 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003585 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003586 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003587 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003588
3589 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003590
David Woodhouse5040a912014-03-09 16:14:00 -07003591 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003592 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003593
David Woodhouse5040a912014-03-09 16:14:00 -07003594 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003595 if (!domain)
3596 return 0;
3597
Weidong Han8c11e792008-12-08 15:29:22 +08003598 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003599 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003600
Omer Peleg2aac6302016-04-20 11:33:57 +03003601 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3602 if (!iova_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003603 goto error;
3604
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003605 /*
3606 * Check if DMAR supports zero-length reads on write only
3607 * mappings..
3608 */
3609 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003610 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003611 prot |= DMA_PTE_READ;
3612 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3613 prot |= DMA_PTE_WRITE;
3614 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003615 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003616 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003617 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003618 * is not a big problem
3619 */
Omer Peleg2aac6302016-04-20 11:33:57 +03003620 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003621 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003622 if (ret)
3623 goto error;
3624
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003625 /* it's a non-present to present mapping. Only flush if caching mode */
3626 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003627 iommu_flush_iotlb_psi(iommu, domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003628 mm_to_dma_pfn(iova_pfn),
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003629 size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003630 else
Weidong Han8c11e792008-12-08 15:29:22 +08003631 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003632
Omer Peleg2aac6302016-04-20 11:33:57 +03003633 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
David Woodhouse03d6a242009-06-28 15:33:46 +01003634 start_paddr += paddr & ~PAGE_MASK;
3635 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003636
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003637error:
Omer Peleg2aac6302016-04-20 11:33:57 +03003638 if (iova_pfn)
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003639 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003640 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003641 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003642 return 0;
3643}
3644
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003645static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3646 unsigned long offset, size_t size,
3647 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003648 unsigned long attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003649{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003650 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003651 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003652}
3653
Omer Peleg769530e2016-04-20 11:33:25 +03003654static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003655{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003656 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003657 unsigned long start_pfn, last_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003658 unsigned long nrpages;
Omer Peleg2aac6302016-04-20 11:33:57 +03003659 unsigned long iova_pfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003660 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003661 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003662
David Woodhouse73676832009-07-04 14:08:36 +01003663 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003664 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003665
David Woodhouse1525a292014-03-06 16:19:30 +00003666 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003667 BUG_ON(!domain);
3668
Weidong Han8c11e792008-12-08 15:29:22 +08003669 iommu = domain_get_iommu(domain);
3670
Omer Peleg2aac6302016-04-20 11:33:57 +03003671 iova_pfn = IOVA_PFN(dev_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003672
Omer Peleg769530e2016-04-20 11:33:25 +03003673 nrpages = aligned_nrpages(dev_addr, size);
Omer Peleg2aac6302016-04-20 11:33:57 +03003674 start_pfn = mm_to_dma_pfn(iova_pfn);
Omer Peleg769530e2016-04-20 11:33:25 +03003675 last_pfn = start_pfn + nrpages - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003676
David Woodhoused794dc92009-06-28 00:27:49 +01003677 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003678 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003679
David Woodhouseea8ea462014-03-05 17:09:32 +00003680 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003681
mark gross5e0d2a62008-03-04 15:22:08 -08003682 if (intel_iommu_strict) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003683 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003684 nrpages, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003685 /* free iova */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003686 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
David Woodhouseea8ea462014-03-05 17:09:32 +00003687 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003688 } else {
Joerg Roedel13cf0172017-08-11 11:40:10 +02003689 queue_iova(&domain->iovad, iova_pfn, nrpages,
3690 (unsigned long)freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003691 /*
3692 * queue up the release of the unmap to save the 1/6th of the
3693 * cpu used up by the iotlb flush operation...
3694 */
mark gross5e0d2a62008-03-04 15:22:08 -08003695 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003696}
3697
Jiang Liud41a4ad2014-07-11 14:19:34 +08003698static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3699 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003700 unsigned long attrs)
Jiang Liud41a4ad2014-07-11 14:19:34 +08003701{
Omer Peleg769530e2016-04-20 11:33:25 +03003702 intel_unmap(dev, dev_addr, size);
Jiang Liud41a4ad2014-07-11 14:19:34 +08003703}
3704
David Woodhouse5040a912014-03-09 16:14:00 -07003705static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003706 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003707 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003708{
Akinobu Mita36746432014-06-04 16:06:51 -07003709 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003710 int order;
3711
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003712 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003713 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003714
David Woodhouse5040a912014-03-09 16:14:00 -07003715 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003716 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003717 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3718 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003719 flags |= GFP_DMA;
3720 else
3721 flags |= GFP_DMA32;
3722 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003723
Mel Gormand0164ad2015-11-06 16:28:21 -08003724 if (gfpflags_allow_blocking(flags)) {
Akinobu Mita36746432014-06-04 16:06:51 -07003725 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003726
Lucas Stach712c6042017-02-24 14:58:44 -08003727 page = dma_alloc_from_contiguous(dev, count, order, flags);
Akinobu Mita36746432014-06-04 16:06:51 -07003728 if (page && iommu_no_mapping(dev) &&
3729 page_to_phys(page) + size > dev->coherent_dma_mask) {
3730 dma_release_from_contiguous(dev, page, count);
3731 page = NULL;
3732 }
3733 }
3734
3735 if (!page)
3736 page = alloc_pages(flags, order);
3737 if (!page)
3738 return NULL;
3739 memset(page_address(page), 0, size);
3740
3741 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003742 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003743 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003744 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003745 return page_address(page);
3746 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3747 __free_pages(page, order);
3748
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003749 return NULL;
3750}
3751
David Woodhouse5040a912014-03-09 16:14:00 -07003752static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003753 dma_addr_t dma_handle, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003754{
3755 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003756 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003757
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003758 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003759 order = get_order(size);
3760
Omer Peleg769530e2016-04-20 11:33:25 +03003761 intel_unmap(dev, dma_handle, size);
Akinobu Mita36746432014-06-04 16:06:51 -07003762 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3763 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003764}
3765
David Woodhouse5040a912014-03-09 16:14:00 -07003766static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003767 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003768 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003769{
Omer Peleg769530e2016-04-20 11:33:25 +03003770 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3771 unsigned long nrpages = 0;
3772 struct scatterlist *sg;
3773 int i;
3774
3775 for_each_sg(sglist, sg, nelems, i) {
3776 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3777 }
3778
3779 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003780}
3781
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003782static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003783 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003784{
3785 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003786 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003787
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003788 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003789 BUG_ON(!sg_page(sg));
Dan Williams3e6110f2015-12-15 12:54:06 -08003790 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003791 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003792 }
3793 return nelems;
3794}
3795
David Woodhouse5040a912014-03-09 16:14:00 -07003796static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003797 enum dma_data_direction dir, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003798{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003799 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003800 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003801 size_t size = 0;
3802 int prot = 0;
Omer Peleg2aac6302016-04-20 11:33:57 +03003803 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003804 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003805 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003806 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003807 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003808
3809 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003810 if (iommu_no_mapping(dev))
3811 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003812
David Woodhouse5040a912014-03-09 16:14:00 -07003813 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003814 if (!domain)
3815 return 0;
3816
Weidong Han8c11e792008-12-08 15:29:22 +08003817 iommu = domain_get_iommu(domain);
3818
David Woodhouseb536d242009-06-28 14:49:31 +01003819 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003820 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003821
Omer Peleg2aac6302016-04-20 11:33:57 +03003822 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
David Woodhouse5040a912014-03-09 16:14:00 -07003823 *dev->dma_mask);
Omer Peleg2aac6302016-04-20 11:33:57 +03003824 if (!iova_pfn) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003825 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003826 return 0;
3827 }
3828
3829 /*
3830 * Check if DMAR supports zero-length reads on write only
3831 * mappings..
3832 */
3833 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003834 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003835 prot |= DMA_PTE_READ;
3836 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3837 prot |= DMA_PTE_WRITE;
3838
Omer Peleg2aac6302016-04-20 11:33:57 +03003839 start_vpfn = mm_to_dma_pfn(iova_pfn);
David Woodhousee1605492009-06-29 11:17:38 +01003840
Fenghua Yuf5329592009-08-04 15:09:37 -07003841 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003842 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003843 dma_pte_free_pagetable(domain, start_vpfn,
David Dillowbc24c572017-06-28 19:42:23 -07003844 start_vpfn + size - 1,
3845 agaw_to_level(domain->agaw) + 1);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003846 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
David Woodhousee1605492009-06-29 11:17:38 +01003847 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003848 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003849
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003850 /* it's a non-present to present mapping. Only flush if caching mode */
3851 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003852 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003853 else
Weidong Han8c11e792008-12-08 15:29:22 +08003854 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003855
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003856 return nelems;
3857}
3858
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003859static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3860{
3861 return !dma_addr;
3862}
3863
Arvind Yadav01e19322017-06-28 16:39:32 +05303864const struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003865 .alloc = intel_alloc_coherent,
3866 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003867 .map_sg = intel_map_sg,
3868 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003869 .map_page = intel_map_page,
3870 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003871 .mapping_error = intel_mapping_error,
Christoph Hellwig5860acc2017-05-22 11:38:27 +02003872#ifdef CONFIG_X86
3873 .dma_supported = x86_dma_supported,
3874#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003875};
3876
3877static inline int iommu_domain_cache_init(void)
3878{
3879 int ret = 0;
3880
3881 iommu_domain_cache = kmem_cache_create("iommu_domain",
3882 sizeof(struct dmar_domain),
3883 0,
3884 SLAB_HWCACHE_ALIGN,
3885
3886 NULL);
3887 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003888 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003889 ret = -ENOMEM;
3890 }
3891
3892 return ret;
3893}
3894
3895static inline int iommu_devinfo_cache_init(void)
3896{
3897 int ret = 0;
3898
3899 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3900 sizeof(struct device_domain_info),
3901 0,
3902 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003903 NULL);
3904 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003905 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003906 ret = -ENOMEM;
3907 }
3908
3909 return ret;
3910}
3911
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003912static int __init iommu_init_mempool(void)
3913{
3914 int ret;
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003915 ret = iova_cache_get();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003916 if (ret)
3917 return ret;
3918
3919 ret = iommu_domain_cache_init();
3920 if (ret)
3921 goto domain_error;
3922
3923 ret = iommu_devinfo_cache_init();
3924 if (!ret)
3925 return ret;
3926
3927 kmem_cache_destroy(iommu_domain_cache);
3928domain_error:
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003929 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003930
3931 return -ENOMEM;
3932}
3933
3934static void __init iommu_exit_mempool(void)
3935{
3936 kmem_cache_destroy(iommu_devinfo_cache);
3937 kmem_cache_destroy(iommu_domain_cache);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003938 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003939}
3940
Dan Williams556ab452010-07-23 15:47:56 -07003941static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3942{
3943 struct dmar_drhd_unit *drhd;
3944 u32 vtbar;
3945 int rc;
3946
3947 /* We know that this device on this chipset has its own IOMMU.
3948 * If we find it under a different IOMMU, then the BIOS is lying
3949 * to us. Hope that the IOMMU for this device is actually
3950 * disabled, and it needs no translation...
3951 */
3952 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3953 if (rc) {
3954 /* "can't" happen */
3955 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3956 return;
3957 }
3958 vtbar &= 0xffff0000;
3959
3960 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3961 drhd = dmar_find_matched_drhd_unit(pdev);
3962 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3963 TAINT_FIRMWARE_WORKAROUND,
3964 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3965 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3966}
3967DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3968
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003969static void __init init_no_remapping_devices(void)
3970{
3971 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003972 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003973 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003974
3975 for_each_drhd_unit(drhd) {
3976 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003977 for_each_active_dev_scope(drhd->devices,
3978 drhd->devices_cnt, i, dev)
3979 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003980 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003981 if (i == drhd->devices_cnt)
3982 drhd->ignored = 1;
3983 }
3984 }
3985
Jiang Liu7c919772014-01-06 14:18:18 +08003986 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003987 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003988 continue;
3989
Jiang Liub683b232014-02-19 14:07:32 +08003990 for_each_active_dev_scope(drhd->devices,
3991 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003992 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003993 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003994 if (i < drhd->devices_cnt)
3995 continue;
3996
David Woodhousec0771df2011-10-14 20:59:46 +01003997 /* This IOMMU has *only* gfx devices. Either bypass it or
3998 set the gfx_mapped flag, as appropriate */
3999 if (dmar_map_gfx) {
4000 intel_iommu_gfx_mapped = 1;
4001 } else {
4002 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08004003 for_each_active_dev_scope(drhd->devices,
4004 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00004005 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004006 }
4007 }
4008}
4009
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004010#ifdef CONFIG_SUSPEND
4011static int init_iommu_hw(void)
4012{
4013 struct dmar_drhd_unit *drhd;
4014 struct intel_iommu *iommu = NULL;
4015
4016 for_each_active_iommu(iommu, drhd)
4017 if (iommu->qi)
4018 dmar_reenable_qi(iommu);
4019
Joseph Cihulab7792602011-05-03 00:08:37 -07004020 for_each_iommu(iommu, drhd) {
4021 if (drhd->ignored) {
4022 /*
4023 * we always have to disable PMRs or DMA may fail on
4024 * this device
4025 */
4026 if (force_on)
4027 iommu_disable_protect_mem_regions(iommu);
4028 continue;
4029 }
4030
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004031 iommu_flush_write_buffer(iommu);
4032
4033 iommu_set_root_entry(iommu);
4034
4035 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004036 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08004037 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4038 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07004039 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004040 }
4041
4042 return 0;
4043}
4044
4045static void iommu_flush_all(void)
4046{
4047 struct dmar_drhd_unit *drhd;
4048 struct intel_iommu *iommu;
4049
4050 for_each_active_iommu(iommu, drhd) {
4051 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004052 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004053 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004054 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004055 }
4056}
4057
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004058static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004059{
4060 struct dmar_drhd_unit *drhd;
4061 struct intel_iommu *iommu = NULL;
4062 unsigned long flag;
4063
4064 for_each_active_iommu(iommu, drhd) {
4065 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4066 GFP_ATOMIC);
4067 if (!iommu->iommu_state)
4068 goto nomem;
4069 }
4070
4071 iommu_flush_all();
4072
4073 for_each_active_iommu(iommu, drhd) {
4074 iommu_disable_translation(iommu);
4075
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004076 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004077
4078 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4079 readl(iommu->reg + DMAR_FECTL_REG);
4080 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4081 readl(iommu->reg + DMAR_FEDATA_REG);
4082 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4083 readl(iommu->reg + DMAR_FEADDR_REG);
4084 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4085 readl(iommu->reg + DMAR_FEUADDR_REG);
4086
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004087 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004088 }
4089 return 0;
4090
4091nomem:
4092 for_each_active_iommu(iommu, drhd)
4093 kfree(iommu->iommu_state);
4094
4095 return -ENOMEM;
4096}
4097
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004098static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004099{
4100 struct dmar_drhd_unit *drhd;
4101 struct intel_iommu *iommu = NULL;
4102 unsigned long flag;
4103
4104 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07004105 if (force_on)
4106 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4107 else
4108 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004109 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004110 }
4111
4112 for_each_active_iommu(iommu, drhd) {
4113
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004114 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004115
4116 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4117 iommu->reg + DMAR_FECTL_REG);
4118 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4119 iommu->reg + DMAR_FEDATA_REG);
4120 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4121 iommu->reg + DMAR_FEADDR_REG);
4122 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4123 iommu->reg + DMAR_FEUADDR_REG);
4124
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004125 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004126 }
4127
4128 for_each_active_iommu(iommu, drhd)
4129 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004130}
4131
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004132static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004133 .resume = iommu_resume,
4134 .suspend = iommu_suspend,
4135};
4136
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004137static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004138{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004139 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004140}
4141
4142#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02004143static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004144#endif /* CONFIG_PM */
4145
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004146
Jiang Liuc2a0b532014-11-09 22:47:56 +08004147int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004148{
4149 struct acpi_dmar_reserved_memory *rmrr;
Eric Auger0659b8d2017-01-19 20:57:53 +00004150 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004151 struct dmar_rmrr_unit *rmrru;
Eric Auger0659b8d2017-01-19 20:57:53 +00004152 size_t length;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004153
4154 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4155 if (!rmrru)
Eric Auger0659b8d2017-01-19 20:57:53 +00004156 goto out;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004157
4158 rmrru->hdr = header;
4159 rmrr = (struct acpi_dmar_reserved_memory *)header;
4160 rmrru->base_address = rmrr->base_address;
4161 rmrru->end_address = rmrr->end_address;
Eric Auger0659b8d2017-01-19 20:57:53 +00004162
4163 length = rmrr->end_address - rmrr->base_address + 1;
4164 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4165 IOMMU_RESV_DIRECT);
4166 if (!rmrru->resv)
4167 goto free_rmrru;
4168
Jiang Liu2e455282014-02-19 14:07:36 +08004169 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4170 ((void *)rmrr) + rmrr->header.length,
4171 &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004172 if (rmrru->devices_cnt && rmrru->devices == NULL)
4173 goto free_all;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004174
Jiang Liu2e455282014-02-19 14:07:36 +08004175 list_add(&rmrru->list, &dmar_rmrr_units);
4176
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004177 return 0;
Eric Auger0659b8d2017-01-19 20:57:53 +00004178free_all:
4179 kfree(rmrru->resv);
4180free_rmrru:
4181 kfree(rmrru);
4182out:
4183 return -ENOMEM;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004184}
4185
Jiang Liu6b197242014-11-09 22:47:58 +08004186static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4187{
4188 struct dmar_atsr_unit *atsru;
4189 struct acpi_dmar_atsr *tmp;
4190
4191 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4192 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4193 if (atsr->segment != tmp->segment)
4194 continue;
4195 if (atsr->header.length != tmp->header.length)
4196 continue;
4197 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4198 return atsru;
4199 }
4200
4201 return NULL;
4202}
4203
4204int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004205{
4206 struct acpi_dmar_atsr *atsr;
4207 struct dmar_atsr_unit *atsru;
4208
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004209 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
Jiang Liu6b197242014-11-09 22:47:58 +08004210 return 0;
4211
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004212 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004213 atsru = dmar_find_atsr(atsr);
4214 if (atsru)
4215 return 0;
4216
4217 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004218 if (!atsru)
4219 return -ENOMEM;
4220
Jiang Liu6b197242014-11-09 22:47:58 +08004221 /*
4222 * If memory is allocated from slab by ACPI _DSM method, we need to
4223 * copy the memory content because the memory buffer will be freed
4224 * on return.
4225 */
4226 atsru->hdr = (void *)(atsru + 1);
4227 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004228 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004229 if (!atsru->include_all) {
4230 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4231 (void *)atsr + atsr->header.length,
4232 &atsru->devices_cnt);
4233 if (atsru->devices_cnt && atsru->devices == NULL) {
4234 kfree(atsru);
4235 return -ENOMEM;
4236 }
4237 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004238
Jiang Liu0e242612014-02-19 14:07:34 +08004239 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004240
4241 return 0;
4242}
4243
Jiang Liu9bdc5312014-01-06 14:18:27 +08004244static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4245{
4246 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4247 kfree(atsru);
4248}
4249
Jiang Liu6b197242014-11-09 22:47:58 +08004250int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4251{
4252 struct acpi_dmar_atsr *atsr;
4253 struct dmar_atsr_unit *atsru;
4254
4255 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4256 atsru = dmar_find_atsr(atsr);
4257 if (atsru) {
4258 list_del_rcu(&atsru->list);
4259 synchronize_rcu();
4260 intel_iommu_free_atsr(atsru);
4261 }
4262
4263 return 0;
4264}
4265
4266int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4267{
4268 int i;
4269 struct device *dev;
4270 struct acpi_dmar_atsr *atsr;
4271 struct dmar_atsr_unit *atsru;
4272
4273 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4274 atsru = dmar_find_atsr(atsr);
4275 if (!atsru)
4276 return 0;
4277
Linus Torvalds194dc872016-07-27 20:03:31 -07004278 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08004279 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4280 i, dev)
4281 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07004282 }
Jiang Liu6b197242014-11-09 22:47:58 +08004283
4284 return 0;
4285}
4286
Jiang Liuffebeb42014-11-09 22:48:02 +08004287static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4288{
4289 int sp, ret = 0;
4290 struct intel_iommu *iommu = dmaru->iommu;
4291
4292 if (g_iommus[iommu->seq_id])
4293 return 0;
4294
4295 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004296 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004297 iommu->name);
4298 return -ENXIO;
4299 }
4300 if (!ecap_sc_support(iommu->ecap) &&
4301 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004302 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004303 iommu->name);
4304 return -ENXIO;
4305 }
4306 sp = domain_update_iommu_superpage(iommu) - 1;
4307 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004308 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004309 iommu->name);
4310 return -ENXIO;
4311 }
4312
4313 /*
4314 * Disable translation if already enabled prior to OS handover.
4315 */
4316 if (iommu->gcmd & DMA_GCMD_TE)
4317 iommu_disable_translation(iommu);
4318
4319 g_iommus[iommu->seq_id] = iommu;
4320 ret = iommu_init_domains(iommu);
4321 if (ret == 0)
4322 ret = iommu_alloc_root_entry(iommu);
4323 if (ret)
4324 goto out;
4325
David Woodhouse8a94ade2015-03-24 14:54:56 +00004326#ifdef CONFIG_INTEL_IOMMU_SVM
4327 if (pasid_enabled(iommu))
4328 intel_svm_alloc_pasid_tables(iommu);
4329#endif
4330
Jiang Liuffebeb42014-11-09 22:48:02 +08004331 if (dmaru->ignored) {
4332 /*
4333 * we always have to disable PMRs or DMA may fail on this device
4334 */
4335 if (force_on)
4336 iommu_disable_protect_mem_regions(iommu);
4337 return 0;
4338 }
4339
4340 intel_iommu_init_qi(iommu);
4341 iommu_flush_write_buffer(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01004342
4343#ifdef CONFIG_INTEL_IOMMU_SVM
4344 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4345 ret = intel_svm_enable_prq(iommu);
4346 if (ret)
4347 goto disable_iommu;
4348 }
4349#endif
Jiang Liuffebeb42014-11-09 22:48:02 +08004350 ret = dmar_set_interrupt(iommu);
4351 if (ret)
4352 goto disable_iommu;
4353
4354 iommu_set_root_entry(iommu);
4355 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4356 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4357 iommu_enable_translation(iommu);
4358
Jiang Liuffebeb42014-11-09 22:48:02 +08004359 iommu_disable_protect_mem_regions(iommu);
4360 return 0;
4361
4362disable_iommu:
4363 disable_dmar_iommu(iommu);
4364out:
4365 free_dmar_iommu(iommu);
4366 return ret;
4367}
4368
Jiang Liu6b197242014-11-09 22:47:58 +08004369int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4370{
Jiang Liuffebeb42014-11-09 22:48:02 +08004371 int ret = 0;
4372 struct intel_iommu *iommu = dmaru->iommu;
4373
4374 if (!intel_iommu_enabled)
4375 return 0;
4376 if (iommu == NULL)
4377 return -EINVAL;
4378
4379 if (insert) {
4380 ret = intel_iommu_add(dmaru);
4381 } else {
4382 disable_dmar_iommu(iommu);
4383 free_dmar_iommu(iommu);
4384 }
4385
4386 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004387}
4388
Jiang Liu9bdc5312014-01-06 14:18:27 +08004389static void intel_iommu_free_dmars(void)
4390{
4391 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4392 struct dmar_atsr_unit *atsru, *atsr_n;
4393
4394 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4395 list_del(&rmrru->list);
4396 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004397 kfree(rmrru->resv);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004398 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004399 }
4400
Jiang Liu9bdc5312014-01-06 14:18:27 +08004401 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4402 list_del(&atsru->list);
4403 intel_iommu_free_atsr(atsru);
4404 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004405}
4406
4407int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4408{
Jiang Liub683b232014-02-19 14:07:32 +08004409 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004410 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004411 struct pci_dev *bridge = NULL;
4412 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004413 struct acpi_dmar_atsr *atsr;
4414 struct dmar_atsr_unit *atsru;
4415
4416 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004417 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004418 bridge = bus->self;
David Woodhoused14053b32015-10-15 09:28:06 +01004419 /* If it's an integrated device, allow ATS */
4420 if (!bridge)
4421 return 1;
4422 /* Connected via non-PCIe: no ATS */
4423 if (!pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004424 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004425 return 0;
David Woodhoused14053b32015-10-15 09:28:06 +01004426 /* If we found the root port, look it up in the ATSR */
Jiang Liub5f82dd2014-02-19 14:07:31 +08004427 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004428 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004429 }
4430
Jiang Liu0e242612014-02-19 14:07:34 +08004431 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004432 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4433 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4434 if (atsr->segment != pci_domain_nr(dev->bus))
4435 continue;
4436
Jiang Liub683b232014-02-19 14:07:32 +08004437 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004438 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004439 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004440
4441 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004442 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004443 }
Jiang Liub683b232014-02-19 14:07:32 +08004444 ret = 0;
4445out:
Jiang Liu0e242612014-02-19 14:07:34 +08004446 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004447
Jiang Liub683b232014-02-19 14:07:32 +08004448 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004449}
4450
Jiang Liu59ce0512014-02-19 14:07:35 +08004451int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4452{
4453 int ret = 0;
4454 struct dmar_rmrr_unit *rmrru;
4455 struct dmar_atsr_unit *atsru;
4456 struct acpi_dmar_atsr *atsr;
4457 struct acpi_dmar_reserved_memory *rmrr;
4458
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004459 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
Jiang Liu59ce0512014-02-19 14:07:35 +08004460 return 0;
4461
4462 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4463 rmrr = container_of(rmrru->hdr,
4464 struct acpi_dmar_reserved_memory, header);
4465 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4466 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4467 ((void *)rmrr) + rmrr->header.length,
4468 rmrr->segment, rmrru->devices,
4469 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004470 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004471 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004472 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004473 dmar_remove_dev_scope(info, rmrr->segment,
4474 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004475 }
4476 }
4477
4478 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4479 if (atsru->include_all)
4480 continue;
4481
4482 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4483 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4484 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4485 (void *)atsr + atsr->header.length,
4486 atsr->segment, atsru->devices,
4487 atsru->devices_cnt);
4488 if (ret > 0)
4489 break;
4490 else if(ret < 0)
4491 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004492 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu59ce0512014-02-19 14:07:35 +08004493 if (dmar_remove_dev_scope(info, atsr->segment,
4494 atsru->devices, atsru->devices_cnt))
4495 break;
4496 }
4497 }
4498
4499 return 0;
4500}
4501
Fenghua Yu99dcade2009-11-11 07:23:06 -08004502/*
4503 * Here we only respond to action of unbound device from driver.
4504 *
4505 * Added device is not attached to its DMAR domain here yet. That will happen
4506 * when mapping the device to iova.
4507 */
4508static int device_notifier(struct notifier_block *nb,
4509 unsigned long action, void *data)
4510{
4511 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004512 struct dmar_domain *domain;
4513
David Woodhouse3d891942014-03-06 15:59:26 +00004514 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004515 return 0;
4516
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004517 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004518 return 0;
4519
David Woodhouse1525a292014-03-06 16:19:30 +00004520 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004521 if (!domain)
4522 return 0;
4523
Joerg Roedele6de0f82015-07-22 16:30:36 +02004524 dmar_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004525 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004526 domain_exit(domain);
Alex Williamsona97590e2011-03-04 14:52:16 -07004527
Fenghua Yu99dcade2009-11-11 07:23:06 -08004528 return 0;
4529}
4530
4531static struct notifier_block device_nb = {
4532 .notifier_call = device_notifier,
4533};
4534
Jiang Liu75f05562014-02-19 14:07:37 +08004535static int intel_iommu_memory_notifier(struct notifier_block *nb,
4536 unsigned long val, void *v)
4537{
4538 struct memory_notify *mhp = v;
4539 unsigned long long start, end;
4540 unsigned long start_vpfn, last_vpfn;
4541
4542 switch (val) {
4543 case MEM_GOING_ONLINE:
4544 start = mhp->start_pfn << PAGE_SHIFT;
4545 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4546 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004547 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004548 start, end);
4549 return NOTIFY_BAD;
4550 }
4551 break;
4552
4553 case MEM_OFFLINE:
4554 case MEM_CANCEL_ONLINE:
4555 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4556 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4557 while (start_vpfn <= last_vpfn) {
4558 struct iova *iova;
4559 struct dmar_drhd_unit *drhd;
4560 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004561 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004562
4563 iova = find_iova(&si_domain->iovad, start_vpfn);
4564 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004565 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004566 start_vpfn);
4567 break;
4568 }
4569
4570 iova = split_and_remove_iova(&si_domain->iovad, iova,
4571 start_vpfn, last_vpfn);
4572 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004573 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004574 start_vpfn, last_vpfn);
4575 return NOTIFY_BAD;
4576 }
4577
David Woodhouseea8ea462014-03-05 17:09:32 +00004578 freelist = domain_unmap(si_domain, iova->pfn_lo,
4579 iova->pfn_hi);
4580
Jiang Liu75f05562014-02-19 14:07:37 +08004581 rcu_read_lock();
4582 for_each_active_iommu(iommu, drhd)
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004583 iommu_flush_iotlb_psi(iommu, si_domain,
Jiang Liua156ef92014-07-11 14:19:36 +08004584 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004585 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004586 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004587 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004588
4589 start_vpfn = iova->pfn_hi + 1;
4590 free_iova_mem(iova);
4591 }
4592 break;
4593 }
4594
4595 return NOTIFY_OK;
4596}
4597
4598static struct notifier_block intel_iommu_memory_nb = {
4599 .notifier_call = intel_iommu_memory_notifier,
4600 .priority = 0
4601};
4602
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004603static void free_all_cpu_cached_iovas(unsigned int cpu)
4604{
4605 int i;
4606
4607 for (i = 0; i < g_num_of_iommus; i++) {
4608 struct intel_iommu *iommu = g_iommus[i];
4609 struct dmar_domain *domain;
Aaron Campbell0caa7612016-07-02 21:23:24 -03004610 int did;
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004611
4612 if (!iommu)
4613 continue;
4614
Jan Niehusmann3bd4f912016-06-06 14:20:11 +02004615 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
Aaron Campbell0caa7612016-07-02 21:23:24 -03004616 domain = get_iommu_domain(iommu, (u16)did);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004617
4618 if (!domain)
4619 continue;
4620 free_cpu_cached_iovas(cpu, &domain->iovad);
4621 }
4622 }
4623}
4624
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004625static int intel_iommu_cpu_dead(unsigned int cpu)
Omer Pelegaa473242016-04-20 11:33:02 +03004626{
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004627 free_all_cpu_cached_iovas(cpu);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004628 return 0;
Omer Pelegaa473242016-04-20 11:33:02 +03004629}
4630
Joerg Roedel161b28a2017-03-28 17:04:52 +02004631static void intel_disable_iommus(void)
4632{
4633 struct intel_iommu *iommu = NULL;
4634 struct dmar_drhd_unit *drhd;
4635
4636 for_each_iommu(iommu, drhd)
4637 iommu_disable_translation(iommu);
4638}
4639
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004640static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4641{
Joerg Roedel2926a2aa2017-08-14 17:19:26 +02004642 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4643
4644 return container_of(iommu_dev, struct intel_iommu, iommu);
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004645}
4646
Alex Williamsona5459cf2014-06-12 16:12:31 -06004647static ssize_t intel_iommu_show_version(struct device *dev,
4648 struct device_attribute *attr,
4649 char *buf)
4650{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004651 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004652 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4653 return sprintf(buf, "%d:%d\n",
4654 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4655}
4656static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4657
4658static ssize_t intel_iommu_show_address(struct device *dev,
4659 struct device_attribute *attr,
4660 char *buf)
4661{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004662 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004663 return sprintf(buf, "%llx\n", iommu->reg_phys);
4664}
4665static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4666
4667static ssize_t intel_iommu_show_cap(struct device *dev,
4668 struct device_attribute *attr,
4669 char *buf)
4670{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004671 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004672 return sprintf(buf, "%llx\n", iommu->cap);
4673}
4674static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4675
4676static ssize_t intel_iommu_show_ecap(struct device *dev,
4677 struct device_attribute *attr,
4678 char *buf)
4679{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004680 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004681 return sprintf(buf, "%llx\n", iommu->ecap);
4682}
4683static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4684
Alex Williamson2238c082015-07-14 15:24:53 -06004685static ssize_t intel_iommu_show_ndoms(struct device *dev,
4686 struct device_attribute *attr,
4687 char *buf)
4688{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004689 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004690 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4691}
4692static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4693
4694static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4695 struct device_attribute *attr,
4696 char *buf)
4697{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004698 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004699 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4700 cap_ndoms(iommu->cap)));
4701}
4702static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4703
Alex Williamsona5459cf2014-06-12 16:12:31 -06004704static struct attribute *intel_iommu_attrs[] = {
4705 &dev_attr_version.attr,
4706 &dev_attr_address.attr,
4707 &dev_attr_cap.attr,
4708 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004709 &dev_attr_domains_supported.attr,
4710 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004711 NULL,
4712};
4713
4714static struct attribute_group intel_iommu_group = {
4715 .name = "intel-iommu",
4716 .attrs = intel_iommu_attrs,
4717};
4718
4719const struct attribute_group *intel_iommu_groups[] = {
4720 &intel_iommu_group,
4721 NULL,
4722};
4723
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004724int __init intel_iommu_init(void)
4725{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004726 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004727 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004728 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004729
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004730 /* VT-d is required for a TXT/tboot launch, so enforce that */
4731 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004732
Jiang Liu3a5670e2014-02-19 14:07:33 +08004733 if (iommu_init_mempool()) {
4734 if (force_on)
4735 panic("tboot: Failed to initialize iommu memory\n");
4736 return -ENOMEM;
4737 }
4738
4739 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004740 if (dmar_table_init()) {
4741 if (force_on)
4742 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004743 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004744 }
4745
Suresh Siddhac2c72862011-08-23 17:05:19 -07004746 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004747 if (force_on)
4748 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004749 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004750 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004751
Joerg Roedelec154bf2017-10-06 15:00:53 +02004752 up_write(&dmar_global_lock);
4753
4754 /*
4755 * The bus notifier takes the dmar_global_lock, so lockdep will
4756 * complain later when we register it under the lock.
4757 */
4758 dmar_register_bus_notifier();
4759
4760 down_write(&dmar_global_lock);
4761
Joerg Roedel161b28a2017-03-28 17:04:52 +02004762 if (no_iommu || dmar_disabled) {
4763 /*
Shaohua Libfd20f12017-04-26 09:18:35 -07004764 * We exit the function here to ensure IOMMU's remapping and
4765 * mempool aren't setup, which means that the IOMMU's PMRs
4766 * won't be disabled via the call to init_dmars(). So disable
4767 * it explicitly here. The PMRs were setup by tboot prior to
4768 * calling SENTER, but the kernel is expected to reset/tear
4769 * down the PMRs.
4770 */
4771 if (intel_iommu_tboot_noforce) {
4772 for_each_iommu(iommu, drhd)
4773 iommu_disable_protect_mem_regions(iommu);
4774 }
4775
4776 /*
Joerg Roedel161b28a2017-03-28 17:04:52 +02004777 * Make sure the IOMMUs are switched off, even when we
4778 * boot into a kexec kernel and the previous kernel left
4779 * them enabled
4780 */
4781 intel_disable_iommus();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004782 goto out_free_dmar;
Joerg Roedel161b28a2017-03-28 17:04:52 +02004783 }
Suresh Siddha2ae21012008-07-10 11:16:43 -07004784
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004785 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004786 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004787
4788 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004789 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004790
Joseph Cihula51a63e62011-03-21 11:04:24 -07004791 if (dmar_init_reserved_ranges()) {
4792 if (force_on)
4793 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004794 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004795 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004796
4797 init_no_remapping_devices();
4798
Joseph Cihulab7792602011-05-03 00:08:37 -07004799 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004800 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004801 if (force_on)
4802 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004803 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004804 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004805 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004806 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004807 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004808
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004809#ifdef CONFIG_SWIOTLB
4810 swiotlb = 0;
4811#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004812 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004813
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004814 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004815
Joerg Roedel39ab9552017-02-01 16:56:46 +01004816 for_each_active_iommu(iommu, drhd) {
4817 iommu_device_sysfs_add(&iommu->iommu, NULL,
4818 intel_iommu_groups,
4819 "%s", iommu->name);
4820 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4821 iommu_device_register(&iommu->iommu);
4822 }
Alex Williamsona5459cf2014-06-12 16:12:31 -06004823
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004824 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004825 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004826 if (si_domain && !hw_pass_through)
4827 register_memory_notifier(&intel_iommu_memory_nb);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004828 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4829 intel_iommu_cpu_dead);
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004830 intel_iommu_enabled = 1;
4831
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004832 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004833
4834out_free_reserved_range:
4835 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004836out_free_dmar:
4837 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004838 up_write(&dmar_global_lock);
4839 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004840 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004841}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004842
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004843static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
Alex Williamson579305f2014-07-03 09:51:43 -06004844{
4845 struct intel_iommu *iommu = opaque;
4846
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004847 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06004848 return 0;
4849}
4850
4851/*
4852 * NB - intel-iommu lacks any sort of reference counting for the users of
4853 * dependent devices. If multiple endpoints have intersecting dependent
4854 * devices, unbinding the driver from any one of them will possibly leave
4855 * the others unable to operate.
4856 */
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004857static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004858{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004859 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004860 return;
4861
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004862 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004863}
4864
Joerg Roedel127c7612015-07-23 17:44:46 +02004865static void __dmar_remove_one_dev_info(struct device_domain_info *info)
Weidong Hanc7151a82008-12-08 22:51:37 +08004866{
Weidong Hanc7151a82008-12-08 22:51:37 +08004867 struct intel_iommu *iommu;
4868 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08004869
Joerg Roedel55d94042015-07-22 16:50:40 +02004870 assert_spin_locked(&device_domain_lock);
4871
Joerg Roedelb608ac32015-07-21 18:19:08 +02004872 if (WARN_ON(!info))
Weidong Hanc7151a82008-12-08 22:51:37 +08004873 return;
4874
Joerg Roedel127c7612015-07-23 17:44:46 +02004875 iommu = info->iommu;
4876
4877 if (info->dev) {
4878 iommu_disable_dev_iotlb(info);
4879 domain_context_clear(iommu, info->dev);
4880 }
4881
Joerg Roedelb608ac32015-07-21 18:19:08 +02004882 unlink_domain_info(info);
Roland Dreier3e7abe22011-07-20 06:22:21 -07004883
Joerg Roedeld160aca2015-07-22 11:52:53 +02004884 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004885 domain_detach_iommu(info->domain, iommu);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004886 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004887
4888 free_devinfo_mem(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004889}
4890
Joerg Roedel55d94042015-07-22 16:50:40 +02004891static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4892 struct device *dev)
4893{
Joerg Roedel127c7612015-07-23 17:44:46 +02004894 struct device_domain_info *info;
Joerg Roedel55d94042015-07-22 16:50:40 +02004895 unsigned long flags;
4896
Weidong Hanc7151a82008-12-08 22:51:37 +08004897 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004898 info = dev->archdata.iommu;
4899 __dmar_remove_one_dev_info(info);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004900 spin_unlock_irqrestore(&device_domain_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004901}
4902
4903static int md_domain_init(struct dmar_domain *domain, int guest_width)
4904{
4905 int adjust_width;
4906
Zhen Leiaa3ac942017-09-21 16:52:45 +01004907 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004908 domain_reserve_special_ranges(domain);
4909
4910 /* calculate AGAW */
4911 domain->gaw = guest_width;
4912 adjust_width = guestwidth_to_adjustwidth(guest_width);
4913 domain->agaw = width_to_agaw(adjust_width);
4914
Weidong Han5e98c4b2008-12-08 23:03:27 +08004915 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004916 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004917 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004918 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004919
4920 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004921 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004922 if (!domain->pgd)
4923 return -ENOMEM;
4924 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4925 return 0;
4926}
4927
Joerg Roedel00a77de2015-03-26 13:43:08 +01004928static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004929{
Joerg Roedel5d450802008-12-03 14:52:32 +01004930 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004931 struct iommu_domain *domain;
4932
4933 if (type != IOMMU_DOMAIN_UNMANAGED)
4934 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004935
Jiang Liuab8dfe22014-07-11 14:19:27 +08004936 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004937 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004938 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004939 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004940 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004941 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004942 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004943 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004944 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004945 }
Allen Kay8140a952011-10-14 12:32:17 -07004946 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004947
Joerg Roedel00a77de2015-03-26 13:43:08 +01004948 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004949 domain->geometry.aperture_start = 0;
4950 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4951 domain->geometry.force_aperture = true;
4952
Joerg Roedel00a77de2015-03-26 13:43:08 +01004953 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004954}
Kay, Allen M38717942008-09-09 18:37:29 +03004955
Joerg Roedel00a77de2015-03-26 13:43:08 +01004956static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004957{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004958 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004959}
Kay, Allen M38717942008-09-09 18:37:29 +03004960
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004961static int intel_iommu_attach_device(struct iommu_domain *domain,
4962 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004963{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004964 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004965 struct intel_iommu *iommu;
4966 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004967 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004968
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004969 if (device_is_rmrr_locked(dev)) {
4970 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4971 return -EPERM;
4972 }
4973
David Woodhouse7207d8f2014-03-09 16:31:06 -07004974 /* normally dev is not mapped */
4975 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004976 struct dmar_domain *old_domain;
4977
David Woodhouse1525a292014-03-06 16:19:30 +00004978 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004979 if (old_domain) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02004980 rcu_read_lock();
Joerg Roedelde7e8882015-07-22 11:58:07 +02004981 dmar_remove_one_dev_info(old_domain, dev);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004982 rcu_read_unlock();
Joerg Roedel62c22162014-12-09 12:56:45 +01004983
4984 if (!domain_type_is_vm_or_si(old_domain) &&
4985 list_empty(&old_domain->devices))
4986 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004987 }
4988 }
4989
David Woodhouse156baca2014-03-09 14:00:57 -07004990 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004991 if (!iommu)
4992 return -ENODEV;
4993
4994 /* check if this iommu agaw is sufficient for max mapped address */
4995 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004996 if (addr_width > cap_mgaw(iommu->cap))
4997 addr_width = cap_mgaw(iommu->cap);
4998
4999 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005000 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005001 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01005002 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005003 return -EFAULT;
5004 }
Tom Lyona99c47a2010-05-17 08:20:45 +01005005 dmar_domain->gaw = addr_width;
5006
5007 /*
5008 * Knock out extra levels of page tables if necessary
5009 */
5010 while (iommu->agaw < dmar_domain->agaw) {
5011 struct dma_pte *pte;
5012
5013 pte = dmar_domain->pgd;
5014 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08005015 dmar_domain->pgd = (struct dma_pte *)
5016 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01005017 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01005018 }
5019 dmar_domain->agaw--;
5020 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005021
Joerg Roedel28ccce02015-07-21 14:45:31 +02005022 return domain_add_dev_info(dmar_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005023}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005024
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005025static void intel_iommu_detach_device(struct iommu_domain *domain,
5026 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005027{
Joerg Roedele6de0f82015-07-22 16:30:36 +02005028 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03005029}
Kay, Allen M38717942008-09-09 18:37:29 +03005030
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005031static int intel_iommu_map(struct iommu_domain *domain,
5032 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005033 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03005034{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005035 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005036 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005037 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005038 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005039
Joerg Roedeldde57a22008-12-03 15:04:09 +01005040 if (iommu_prot & IOMMU_READ)
5041 prot |= DMA_PTE_READ;
5042 if (iommu_prot & IOMMU_WRITE)
5043 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08005044 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5045 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005046
David Woodhouse163cc522009-06-28 00:51:17 +01005047 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005048 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005049 u64 end;
5050
5051 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01005052 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005053 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005054 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005055 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01005056 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005057 return -EFAULT;
5058 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01005059 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005060 }
David Woodhousead051222009-06-28 14:22:28 +01005061 /* Round up size to next multiple of PAGE_SIZE, if it and
5062 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01005063 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01005064 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5065 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005066 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03005067}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005068
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005069static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00005070 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005071{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005072 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00005073 struct page *freelist = NULL;
5074 struct intel_iommu *iommu;
5075 unsigned long start_pfn, last_pfn;
5076 unsigned int npages;
Joerg Roedel42e8c182015-07-21 15:50:02 +02005077 int iommu_id, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01005078
David Woodhouse5cf0a762014-03-19 16:07:49 +00005079 /* Cope with horrid API which requires us to unmap more than the
5080 size argument if it happens to be a large-page mapping. */
Joerg Roedeldc02e462015-08-13 11:15:13 +02005081 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
David Woodhouse5cf0a762014-03-19 16:07:49 +00005082
5083 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5084 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5085
David Woodhouseea8ea462014-03-05 17:09:32 +00005086 start_pfn = iova >> VTD_PAGE_SHIFT;
5087 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5088
5089 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5090
5091 npages = last_pfn - start_pfn + 1;
5092
Joerg Roedel29a27712015-07-21 17:17:12 +02005093 for_each_domain_iommu(iommu_id, dmar_domain) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02005094 iommu = g_iommus[iommu_id];
David Woodhouseea8ea462014-03-05 17:09:32 +00005095
Joerg Roedel42e8c182015-07-21 15:50:02 +02005096 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5097 start_pfn, npages, !freelist, 0);
David Woodhouseea8ea462014-03-05 17:09:32 +00005098 }
5099
5100 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005101
David Woodhouse163cc522009-06-28 00:51:17 +01005102 if (dmar_domain->max_addr == iova + size)
5103 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005104
David Woodhouse5cf0a762014-03-19 16:07:49 +00005105 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005106}
Kay, Allen M38717942008-09-09 18:37:29 +03005107
Joerg Roedeld14d6572008-12-03 15:06:57 +01005108static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05305109 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03005110{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005111 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03005112 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00005113 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005114 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03005115
David Woodhouse5cf0a762014-03-19 16:07:49 +00005116 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03005117 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005118 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03005119
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005120 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03005121}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005122
Joerg Roedel5d587b82014-09-05 10:50:45 +02005123static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005124{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005125 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005126 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04005127 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005128 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005129
Joerg Roedel5d587b82014-09-05 10:50:45 +02005130 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005131}
5132
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005133static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005134{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005135 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005136 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07005137 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04005138
Alex Williamsona5459cf2014-06-12 16:12:31 -06005139 iommu = device_to_iommu(dev, &bus, &devfn);
5140 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005141 return -ENODEV;
5142
Joerg Roedele3d10af2017-02-01 17:23:22 +01005143 iommu_device_link(&iommu->iommu, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005144
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005145 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06005146
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005147 if (IS_ERR(group))
5148 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005149
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005150 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005151 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005152}
5153
5154static void intel_iommu_remove_device(struct device *dev)
5155{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005156 struct intel_iommu *iommu;
5157 u8 bus, devfn;
5158
5159 iommu = device_to_iommu(dev, &bus, &devfn);
5160 if (!iommu)
5161 return;
5162
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005163 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06005164
Joerg Roedele3d10af2017-02-01 17:23:22 +01005165 iommu_device_unlink(&iommu->iommu, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005166}
5167
Eric Auger0659b8d2017-01-19 20:57:53 +00005168static void intel_iommu_get_resv_regions(struct device *device,
5169 struct list_head *head)
5170{
5171 struct iommu_resv_region *reg;
5172 struct dmar_rmrr_unit *rmrr;
5173 struct device *i_dev;
5174 int i;
5175
5176 rcu_read_lock();
5177 for_each_rmrr_units(rmrr) {
5178 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5179 i, i_dev) {
5180 if (i_dev != device)
5181 continue;
5182
5183 list_add_tail(&rmrr->resv->list, head);
5184 }
5185 }
5186 rcu_read_unlock();
5187
5188 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5189 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00005190 0, IOMMU_RESV_MSI);
Eric Auger0659b8d2017-01-19 20:57:53 +00005191 if (!reg)
5192 return;
5193 list_add_tail(&reg->list, head);
5194}
5195
5196static void intel_iommu_put_resv_regions(struct device *dev,
5197 struct list_head *head)
5198{
5199 struct iommu_resv_region *entry, *next;
5200
5201 list_for_each_entry_safe(entry, next, head, list) {
5202 if (entry->type == IOMMU_RESV_RESERVED)
5203 kfree(entry);
5204 }
Kay, Allen M38717942008-09-09 18:37:29 +03005205}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005206
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005207#ifdef CONFIG_INTEL_IOMMU_SVM
Jacob Pan65ca7f52016-12-06 10:14:23 -08005208#define MAX_NR_PASID_BITS (20)
5209static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5210{
5211 /*
5212 * Convert ecap_pss to extend context entry pts encoding, also
5213 * respect the soft pasid_max value set by the iommu.
5214 * - number of PASID bits = ecap_pss + 1
5215 * - number of PASID table entries = 2^(pts + 5)
5216 * Therefore, pts = ecap_pss - 4
5217 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5218 */
5219 if (ecap_pss(iommu->ecap) < 5)
5220 return 0;
5221
5222 /* pasid_max is encoded as actual number of entries not the bits */
5223 return find_first_bit((unsigned long *)&iommu->pasid_max,
5224 MAX_NR_PASID_BITS) - 5;
5225}
5226
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005227int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5228{
5229 struct device_domain_info *info;
5230 struct context_entry *context;
5231 struct dmar_domain *domain;
5232 unsigned long flags;
5233 u64 ctx_lo;
5234 int ret;
5235
5236 domain = get_valid_domain_for_dev(sdev->dev);
5237 if (!domain)
5238 return -EINVAL;
5239
5240 spin_lock_irqsave(&device_domain_lock, flags);
5241 spin_lock(&iommu->lock);
5242
5243 ret = -EINVAL;
5244 info = sdev->dev->archdata.iommu;
5245 if (!info || !info->pasid_supported)
5246 goto out;
5247
5248 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5249 if (WARN_ON(!context))
5250 goto out;
5251
5252 ctx_lo = context[0].lo;
5253
5254 sdev->did = domain->iommu_did[iommu->seq_id];
5255 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5256
5257 if (!(ctx_lo & CONTEXT_PASIDE)) {
Ashok Raj11b93eb2017-08-08 13:29:28 -07005258 if (iommu->pasid_state_table)
5259 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
Jacob Pan65ca7f52016-12-06 10:14:23 -08005260 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5261 intel_iommu_get_pts(iommu);
5262
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005263 wmb();
5264 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5265 * extended to permit requests-with-PASID if the PASIDE bit
5266 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5267 * however, the PASIDE bit is ignored and requests-with-PASID
5268 * are unconditionally blocked. Which makes less sense.
5269 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5270 * "guest mode" translation types depending on whether ATS
5271 * is available or not. Annoyingly, we can't use the new
5272 * modes *unless* PASIDE is set. */
5273 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5274 ctx_lo &= ~CONTEXT_TT_MASK;
5275 if (info->ats_supported)
5276 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5277 else
5278 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5279 }
5280 ctx_lo |= CONTEXT_PASIDE;
David Woodhouse907fea32015-10-13 14:11:13 +01005281 if (iommu->pasid_state_table)
5282 ctx_lo |= CONTEXT_DINVE;
David Woodhousea222a7f2015-10-07 23:35:18 +01005283 if (info->pri_supported)
5284 ctx_lo |= CONTEXT_PRS;
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005285 context[0].lo = ctx_lo;
5286 wmb();
5287 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5288 DMA_CCMD_MASK_NOBIT,
5289 DMA_CCMD_DEVICE_INVL);
5290 }
5291
5292 /* Enable PASID support in the device, if it wasn't already */
5293 if (!info->pasid_enabled)
5294 iommu_enable_dev_iotlb(info);
5295
5296 if (info->ats_enabled) {
5297 sdev->dev_iotlb = 1;
5298 sdev->qdep = info->ats_qdep;
5299 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5300 sdev->qdep = 0;
5301 }
5302 ret = 0;
5303
5304 out:
5305 spin_unlock(&iommu->lock);
5306 spin_unlock_irqrestore(&device_domain_lock, flags);
5307
5308 return ret;
5309}
5310
5311struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5312{
5313 struct intel_iommu *iommu;
5314 u8 bus, devfn;
5315
5316 if (iommu_dummy(dev)) {
5317 dev_warn(dev,
5318 "No IOMMU translation for device; cannot enable SVM\n");
5319 return NULL;
5320 }
5321
5322 iommu = device_to_iommu(dev, &bus, &devfn);
5323 if ((!iommu)) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005324 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005325 return NULL;
5326 }
5327
5328 if (!iommu->pasid_table) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005329 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005330 return NULL;
5331 }
5332
5333 return iommu;
5334}
5335#endif /* CONFIG_INTEL_IOMMU_SVM */
5336
Joerg Roedelb0119e82017-02-01 13:23:08 +01005337const struct iommu_ops intel_iommu_ops = {
Eric Auger0659b8d2017-01-19 20:57:53 +00005338 .capable = intel_iommu_capable,
5339 .domain_alloc = intel_iommu_domain_alloc,
5340 .domain_free = intel_iommu_domain_free,
5341 .attach_dev = intel_iommu_attach_device,
5342 .detach_dev = intel_iommu_detach_device,
5343 .map = intel_iommu_map,
5344 .unmap = intel_iommu_unmap,
5345 .map_sg = default_iommu_map_sg,
5346 .iova_to_phys = intel_iommu_iova_to_phys,
5347 .add_device = intel_iommu_add_device,
5348 .remove_device = intel_iommu_remove_device,
5349 .get_resv_regions = intel_iommu_get_resv_regions,
5350 .put_resv_regions = intel_iommu_put_resv_regions,
5351 .device_group = pci_device_group,
5352 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005353};
David Woodhouse9af88142009-02-13 23:18:03 +00005354
Daniel Vetter94526182013-01-20 23:50:13 +01005355static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5356{
5357 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005358 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01005359 dmar_map_gfx = 0;
5360}
5361
5362DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5363DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5365DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5366DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5367DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5369
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005370static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00005371{
5372 /*
5373 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01005374 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00005375 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005376 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00005377 rwbf_quirk = 1;
5378}
5379
5380DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01005381DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5382DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5383DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5384DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5385DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5386DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07005387
Adam Jacksoneecfd572010-08-25 21:17:34 +01005388#define GGC 0x52
5389#define GGC_MEMORY_SIZE_MASK (0xf << 8)
5390#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5391#define GGC_MEMORY_SIZE_1M (0x1 << 8)
5392#define GGC_MEMORY_SIZE_2M (0x3 << 8)
5393#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5394#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5395#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5396#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5397
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005398static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01005399{
5400 unsigned short ggc;
5401
Adam Jacksoneecfd572010-08-25 21:17:34 +01005402 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01005403 return;
5404
Adam Jacksoneecfd572010-08-25 21:17:34 +01005405 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005406 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01005407 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005408 } else if (dmar_map_gfx) {
5409 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005410 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005411 intel_iommu_strict = 1;
5412 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01005413}
5414DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5415DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5416DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5417DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5418
David Woodhousee0fc7e02009-09-30 09:12:17 -07005419/* On Tylersburg chipsets, some BIOSes have been known to enable the
5420 ISOCH DMAR unit for the Azalia sound device, but not give it any
5421 TLB entries, which causes it to deadlock. Check for that. We do
5422 this in a function called from init_dmars(), instead of in a PCI
5423 quirk, because we don't want to print the obnoxious "BIOS broken"
5424 message if VT-d is actually disabled.
5425*/
5426static void __init check_tylersburg_isoch(void)
5427{
5428 struct pci_dev *pdev;
5429 uint32_t vtisochctrl;
5430
5431 /* If there's no Azalia in the system anyway, forget it. */
5432 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5433 if (!pdev)
5434 return;
5435 pci_dev_put(pdev);
5436
5437 /* System Management Registers. Might be hidden, in which case
5438 we can't do the sanity check. But that's OK, because the
5439 known-broken BIOSes _don't_ actually hide it, so far. */
5440 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5441 if (!pdev)
5442 return;
5443
5444 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5445 pci_dev_put(pdev);
5446 return;
5447 }
5448
5449 pci_dev_put(pdev);
5450
5451 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5452 if (vtisochctrl & 1)
5453 return;
5454
5455 /* Drop all bits other than the number of TLB entries */
5456 vtisochctrl &= 0x1c;
5457
5458 /* If we have the recommended number of TLB entries (16), fine. */
5459 if (vtisochctrl == 0x10)
5460 return;
5461
5462 /* Zero TLB entries? You get to ride the short bus to school. */
5463 if (!vtisochctrl) {
5464 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5465 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5466 dmi_get_system_info(DMI_BIOS_VENDOR),
5467 dmi_get_system_info(DMI_BIOS_VERSION),
5468 dmi_get_system_info(DMI_PRODUCT_VERSION));
5469 iommu_identity_mapping |= IDENTMAP_AZALIA;
5470 return;
5471 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005472
5473 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005474 vtisochctrl);
5475}