blob: 582fd01cb7d1b5ab9a0ece0d08e4bbdc0aac6188 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
Omer Pelegaa473242016-04-20 11:33:02 +030036#include <linux/cpu.h>
mark gross5e0d2a62008-03-04 15:22:08 -080037#include <linux/timer.h>
Dan Williamsdfddb962015-10-09 18:16:46 -040038#include <linux/io.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010040#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030041#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010042#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070043#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100044#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020045#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080046#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070047#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020048#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070049#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090051#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052
Joerg Roedel078e1ee2012-09-26 12:44:43 +020053#include "irq_remapping.h"
54
Fenghua Yu5b6985c2008-10-16 18:02:32 -070055#define ROOT_SIZE VTD_PAGE_SIZE
56#define CONTEXT_SIZE VTD_PAGE_SIZE
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000059#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070061#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070062
63#define IOAPIC_RANGE_START (0xfee00000)
64#define IOAPIC_RANGE_END (0xfeefffff)
65#define IOVA_START_ADDR (0x1000)
66
Sohil Mehta5e3b4a12017-12-20 11:59:24 -080067#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070068
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080070#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070071
David Woodhouse2ebe3152009-09-19 07:34:04 -070072#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070080
Robin Murphy1b722502015-01-12 17:51:15 +000081/* IO virtual address start page frame number */
82#define IOVA_START_PFN (1)
83
Mark McLoughlinf27be032008-11-20 15:49:43 +000084#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
mark gross5e0d2a62008-03-04 15:22:08 -080085
Andrew Mortondf08cdc2010-09-22 13:05:11 -070086/* page table handling */
87#define LEVEL_STRIDE (9)
88#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020090/*
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
94 * that we support.
95 *
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
99 *
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
102 *
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
105 */
106#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108static inline int agaw_to_level(int agaw)
109{
110 return agaw + 2;
111}
112
113static inline int agaw_to_width(int agaw)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline int width_to_agaw(int width)
119{
Jiang Liu5c645b32014-01-06 14:18:12 +0800120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700121}
122
123static inline unsigned int level_to_offset_bits(int level)
124{
125 return (level - 1) * LEVEL_STRIDE;
126}
127
128static inline int pfn_level_offset(unsigned long pfn, int level)
129{
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131}
132
133static inline unsigned long level_mask(int level)
134{
135 return -1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long level_size(int level)
139{
140 return 1UL << level_to_offset_bits(level);
141}
142
143static inline unsigned long align_to_level(unsigned long pfn, int level)
144{
145 return (pfn + level_size(level) - 1) & level_mask(level);
146}
David Woodhousefd18de52009-05-10 23:57:41 +0100147
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100148static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149{
Jiang Liu5c645b32014-01-06 14:18:12 +0800150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151}
152
David Woodhousedd4e8312009-06-27 16:21:20 +0100153/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156{
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159
160static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161{
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163}
164static inline unsigned long page_to_dma_pfn(struct page *pg)
165{
166 return mm_to_dma_pfn(page_to_pfn(pg));
167}
168static inline unsigned long virt_to_dma_pfn(void *p)
169{
170 return page_to_dma_pfn(virt_to_page(p));
171}
172
Weidong Hand9630fe2008-12-08 11:06:32 +0800173/* global iommu list, set NULL for ignored DMAR units */
174static struct intel_iommu **g_iommus;
175
David Woodhousee0fc7e02009-09-30 09:12:17 -0700176static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000177static int rwbf_quirk;
178
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
182 */
183static int force_on = 0;
Shaohua Libfd20f12017-04-26 09:18:35 -0700184int intel_iommu_tboot_noforce;
Joseph Cihulab7792602011-05-03 00:08:37 -0700185
186/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000187 * 0: Present
188 * 1-11: Reserved
189 * 12-63: Context Ptr (12 - (haw-1))
190 * 64-127: Reserved
191 */
192struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000193 u64 lo;
194 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000195};
196#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000197
Joerg Roedel091d42e2015-06-12 11:56:10 +0200198/*
199 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
200 * if marked present.
201 */
202static phys_addr_t root_entry_lctp(struct root_entry *re)
203{
204 if (!(re->lo & 1))
205 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000206
Joerg Roedel091d42e2015-06-12 11:56:10 +0200207 return re->lo & VTD_PAGE_MASK;
208}
209
210/*
211 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
212 * if marked present.
213 */
214static phys_addr_t root_entry_uctp(struct root_entry *re)
215{
216 if (!(re->hi & 1))
217 return 0;
218
219 return re->hi & VTD_PAGE_MASK;
220}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000221/*
222 * low 64 bits:
223 * 0: present
224 * 1: fault processing disable
225 * 2-3: translation type
226 * 12-63: address space root
227 * high 64 bits:
228 * 0-2: address width
229 * 3-6: aval
230 * 8-23: domain id
231 */
232struct context_entry {
233 u64 lo;
234 u64 hi;
235};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000236
Joerg Roedelcf484d02015-06-12 12:21:46 +0200237static inline void context_clear_pasid_enable(struct context_entry *context)
238{
239 context->lo &= ~(1ULL << 11);
240}
241
242static inline bool context_pasid_enabled(struct context_entry *context)
243{
244 return !!(context->lo & (1ULL << 11));
245}
246
247static inline void context_set_copied(struct context_entry *context)
248{
249 context->hi |= (1ull << 3);
250}
251
252static inline bool context_copied(struct context_entry *context)
253{
254 return !!(context->hi & (1ULL << 3));
255}
256
257static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000258{
259 return (context->lo & 1);
260}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200261
262static inline bool context_present(struct context_entry *context)
263{
264 return context_pasid_enabled(context) ?
265 __context_present(context) :
266 __context_present(context) && !context_copied(context);
267}
268
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000269static inline void context_set_present(struct context_entry *context)
270{
271 context->lo |= 1;
272}
273
274static inline void context_set_fault_enable(struct context_entry *context)
275{
276 context->lo &= (((u64)-1) << 2) | 1;
277}
278
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000279static inline void context_set_translation_type(struct context_entry *context,
280 unsigned long value)
281{
282 context->lo &= (((u64)-1) << 4) | 3;
283 context->lo |= (value & 3) << 2;
284}
285
286static inline void context_set_address_root(struct context_entry *context,
287 unsigned long value)
288{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800289 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000290 context->lo |= value & VTD_PAGE_MASK;
291}
292
293static inline void context_set_address_width(struct context_entry *context,
294 unsigned long value)
295{
296 context->hi |= value & 7;
297}
298
299static inline void context_set_domain_id(struct context_entry *context,
300 unsigned long value)
301{
302 context->hi |= (value & ((1 << 16) - 1)) << 8;
303}
304
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200305static inline int context_domain_id(struct context_entry *c)
306{
307 return((c->hi >> 8) & 0xffff);
308}
309
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000310static inline void context_clear_entry(struct context_entry *context)
311{
312 context->lo = 0;
313 context->hi = 0;
314}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000315
Mark McLoughlin622ba122008-11-20 15:49:46 +0000316/*
317 * 0: readable
318 * 1: writable
319 * 2-6: reserved
320 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800321 * 8-10: available
322 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000323 * 12-63: Host physcial address
324 */
325struct dma_pte {
326 u64 val;
327};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000328
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000329static inline void dma_clear_pte(struct dma_pte *pte)
330{
331 pte->val = 0;
332}
333
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000334static inline u64 dma_pte_addr(struct dma_pte *pte)
335{
David Woodhousec85994e2009-07-01 19:21:24 +0100336#ifdef CONFIG_64BIT
337 return pte->val & VTD_PAGE_MASK;
338#else
339 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100340 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100341#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000342}
343
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000344static inline bool dma_pte_present(struct dma_pte *pte)
345{
346 return (pte->val & 3) != 0;
347}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000348
Allen Kay4399c8b2011-10-14 12:32:46 -0700349static inline bool dma_pte_superpage(struct dma_pte *pte)
350{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200351 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700352}
353
David Woodhouse75e6bf92009-07-02 11:21:16 +0100354static inline int first_pte_in_page(struct dma_pte *pte)
355{
356 return !((unsigned long)pte & ~VTD_PAGE_MASK);
357}
358
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700359/*
360 * This domain is a statically identity mapping domain.
361 * 1. This domain creats a static 1:1 mapping to all usable memory.
362 * 2. It maps to each iommu if successful.
363 * 3. Each iommu mapps to this domain if successful.
364 */
David Woodhouse19943b02009-08-04 16:19:20 +0100365static struct dmar_domain *si_domain;
366static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700367
Joerg Roedel28ccce02015-07-21 14:45:31 +0200368/*
369 * Domain represents a virtual machine, more than one devices
Weidong Han1ce28fe2008-12-08 16:35:39 +0800370 * across iommus may be owned in one domain, e.g. kvm guest.
371 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800372#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800373
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700374/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800375#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700376
Joerg Roedel29a27712015-07-21 17:17:12 +0200377#define for_each_domain_iommu(idx, domain) \
378 for (idx = 0; idx < g_num_of_iommus; idx++) \
379 if (domain->iommu_refcnt[idx])
380
Mark McLoughlin99126f72008-11-20 15:49:47 +0000381struct dmar_domain {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700382 int nid; /* node id */
Joerg Roedel29a27712015-07-21 17:17:12 +0200383
384 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
385 /* Refcount of devices per iommu */
386
Mark McLoughlin99126f72008-11-20 15:49:47 +0000387
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +0200388 u16 iommu_did[DMAR_UNITS_SUPPORTED];
389 /* Domain ids per IOMMU. Use u16 since
390 * domain ids are 16 bit wide according
391 * to VT-d spec, section 9.3 */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000392
Omer Peleg0824c592016-04-20 19:03:35 +0300393 bool has_iotlb_device;
Joerg Roedel00a77de2015-03-26 13:43:08 +0100394 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000395 struct iova_domain iovad; /* iova's that belong to this domain */
396
397 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000398 int gaw; /* max guest address width */
399
400 /* adjusted guest address width, 0 is level 2 30-bit */
401 int agaw;
402
Weidong Han3b5410e2008-12-08 09:17:15 +0800403 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800404
405 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800406 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800407 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100408 int iommu_superpage;/* Level of superpages supported:
409 0 == 4KiB (no superpages), 1 == 2MiB,
410 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800411 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100412
413 struct iommu_domain domain; /* generic domain data structure for
414 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000415};
416
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000417/* PCI domain-device relationship */
418struct device_domain_info {
419 struct list_head link; /* link to domain siblings */
420 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100421 u8 bus; /* PCI bus number */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000422 u8 devfn; /* PCI devfn number */
David Woodhouseb16d0cb2015-10-12 14:17:37 +0100423 u8 pasid_supported:3;
424 u8 pasid_enabled:1;
425 u8 pri_supported:1;
426 u8 pri_enabled:1;
427 u8 ats_supported:1;
428 u8 ats_enabled:1;
429 u8 ats_qdep;
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000430 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800431 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000432 struct dmar_domain *domain; /* pointer to domain */
433};
434
Jiang Liub94e4112014-02-19 14:07:25 +0800435struct dmar_rmrr_unit {
436 struct list_head list; /* list of rmrr units */
437 struct acpi_dmar_header *hdr; /* ACPI header */
438 u64 base_address; /* reserved base address*/
439 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000440 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800441 int devices_cnt; /* target device count */
Eric Auger0659b8d2017-01-19 20:57:53 +0000442 struct iommu_resv_region *resv; /* reserved region handle */
Jiang Liub94e4112014-02-19 14:07:25 +0800443};
444
445struct dmar_atsr_unit {
446 struct list_head list; /* list of ATSR units */
447 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000448 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800449 int devices_cnt; /* target device count */
450 u8 include_all:1; /* include all ports */
451};
452
453static LIST_HEAD(dmar_atsr_units);
454static LIST_HEAD(dmar_rmrr_units);
455
456#define for_each_rmrr_units(rmrr) \
457 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
458
mark gross5e0d2a62008-03-04 15:22:08 -0800459/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800460static int g_num_of_iommus;
461
Jiang Liu92d03cc2014-02-19 14:07:28 +0800462static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463static void domain_remove_dev_info(struct dmar_domain *domain);
Joerg Roedele6de0f82015-07-22 16:30:36 +0200464static void dmar_remove_one_dev_info(struct dmar_domain *domain,
465 struct device *dev);
Joerg Roedel127c7612015-07-23 17:44:46 +0200466static void __dmar_remove_one_dev_info(struct device_domain_info *info);
Joerg Roedel2452d9d2015-07-23 16:20:14 +0200467static void domain_context_clear(struct intel_iommu *iommu,
468 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800469static int domain_detach_iommu(struct dmar_domain *domain,
470 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700471
Suresh Siddhad3f13812011-08-23 17:05:25 -0700472#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800473int dmar_disabled = 0;
474#else
475int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700476#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800477
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200478int intel_iommu_enabled = 0;
479EXPORT_SYMBOL_GPL(intel_iommu_enabled);
480
David Woodhouse2d9e6672010-06-15 10:57:57 +0100481static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700482static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800483static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100484static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100485static int intel_iommu_ecs = 1;
David Woodhouseae853dd2015-09-09 11:58:59 +0100486static int intel_iommu_pasid28;
487static int iommu_identity_mapping;
David Woodhousec83b2f22015-06-12 10:15:49 +0100488
David Woodhouseae853dd2015-09-09 11:58:59 +0100489#define IDENTMAP_ALL 1
490#define IDENTMAP_GFX 2
491#define IDENTMAP_AZALIA 4
David Woodhousec83b2f22015-06-12 10:15:49 +0100492
David Woodhoused42fde72015-10-24 21:33:01 +0200493/* Broadwell and Skylake have broken ECS support — normal so-called "second
494 * level" translation of DMA requests-without-PASID doesn't actually happen
495 * unless you also set the NESTE bit in an extended context-entry. Which of
496 * course means that SVM doesn't work because it's trying to do nested
497 * translation of the physical addresses it finds in the process page tables,
498 * through the IOVA->phys mapping found in the "second level" page tables.
499 *
500 * The VT-d specification was retroactively changed to change the definition
501 * of the capability bits and pretend that Broadwell/Skylake never happened...
502 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
503 * for some reason it was the PASID capability bit which was redefined (from
504 * bit 28 on BDW/SKL to bit 40 in future).
505 *
506 * So our test for ECS needs to eschew those implementations which set the old
507 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
508 * Unless we are working around the 'pasid28' limitations, that is, by putting
509 * the device into passthrough mode for normal DMA and thus masking the bug.
510 */
David Woodhousec83b2f22015-06-12 10:15:49 +0100511#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
David Woodhoused42fde72015-10-24 21:33:01 +0200512 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
513/* PASID support is thus enabled if ECS is enabled and *either* of the old
514 * or new capability bits are set. */
515#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
516 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517
David Woodhousec0771df2011-10-14 20:59:46 +0100518int intel_iommu_gfx_mapped;
519EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
520
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
522static DEFINE_SPINLOCK(device_domain_lock);
523static LIST_HEAD(device_domain_list);
524
Joerg Roedelb0119e82017-02-01 13:23:08 +0100525const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100526
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200527static bool translation_pre_enabled(struct intel_iommu *iommu)
528{
529 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
530}
531
Joerg Roedel091d42e2015-06-12 11:56:10 +0200532static void clear_translation_pre_enabled(struct intel_iommu *iommu)
533{
534 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
535}
536
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200537static void init_translation_status(struct intel_iommu *iommu)
538{
539 u32 gsts;
540
541 gsts = readl(iommu->reg + DMAR_GSTS_REG);
542 if (gsts & DMA_GSTS_TES)
543 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
544}
545
Joerg Roedel00a77de2015-03-26 13:43:08 +0100546/* Convert generic 'struct iommu_domain to private struct dmar_domain */
547static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
548{
549 return container_of(dom, struct dmar_domain, domain);
550}
551
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700552static int __init intel_iommu_setup(char *str)
553{
554 if (!str)
555 return -EINVAL;
556 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800557 if (!strncmp(str, "on", 2)) {
558 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200559 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800560 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700561 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200562 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700563 } else if (!strncmp(str, "igfx_off", 8)) {
564 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200565 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700566 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200567 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700568 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800569 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200570 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800571 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100572 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200573 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100574 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100575 } else if (!strncmp(str, "ecs_off", 7)) {
576 printk(KERN_INFO
577 "Intel-IOMMU: disable extended context table support\n");
578 intel_iommu_ecs = 0;
David Woodhouseae853dd2015-09-09 11:58:59 +0100579 } else if (!strncmp(str, "pasid28", 7)) {
580 printk(KERN_INFO
581 "Intel-IOMMU: enable pre-production PASID support\n");
582 intel_iommu_pasid28 = 1;
583 iommu_identity_mapping |= IDENTMAP_GFX;
Shaohua Libfd20f12017-04-26 09:18:35 -0700584 } else if (!strncmp(str, "tboot_noforce", 13)) {
585 printk(KERN_INFO
586 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
587 intel_iommu_tboot_noforce = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700588 }
589
590 str += strcspn(str, ",");
591 while (*str == ',')
592 str++;
593 }
594 return 0;
595}
596__setup("intel_iommu=", intel_iommu_setup);
597
598static struct kmem_cache *iommu_domain_cache;
599static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700600
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200601static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
602{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200603 struct dmar_domain **domains;
604 int idx = did >> 8;
605
606 domains = iommu->domains[idx];
607 if (!domains)
608 return NULL;
609
610 return domains[did & 0xff];
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200611}
612
613static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
614 struct dmar_domain *domain)
615{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200616 struct dmar_domain **domains;
617 int idx = did >> 8;
618
619 if (!iommu->domains[idx]) {
620 size_t size = 256 * sizeof(struct dmar_domain *);
621 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
622 }
623
624 domains = iommu->domains[idx];
625 if (WARN_ON(!domains))
626 return;
627 else
628 domains[did & 0xff] = domain;
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200629}
630
Suresh Siddha4c923d42009-10-02 11:01:24 -0700631static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700632{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700633 struct page *page;
634 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700635
Suresh Siddha4c923d42009-10-02 11:01:24 -0700636 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
637 if (page)
638 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700639 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700640}
641
642static inline void free_pgtable_page(void *vaddr)
643{
644 free_page((unsigned long)vaddr);
645}
646
647static inline void *alloc_domain_mem(void)
648{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900649 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700650}
651
Kay, Allen M38717942008-09-09 18:37:29 +0300652static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700653{
654 kmem_cache_free(iommu_domain_cache, vaddr);
655}
656
657static inline void * alloc_devinfo_mem(void)
658{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900659 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700660}
661
662static inline void free_devinfo_mem(void *vaddr)
663{
664 kmem_cache_free(iommu_devinfo_cache, vaddr);
665}
666
Jiang Liuab8dfe22014-07-11 14:19:27 +0800667static inline int domain_type_is_vm(struct dmar_domain *domain)
668{
669 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
670}
671
Joerg Roedel28ccce02015-07-21 14:45:31 +0200672static inline int domain_type_is_si(struct dmar_domain *domain)
673{
674 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
675}
676
Jiang Liuab8dfe22014-07-11 14:19:27 +0800677static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
678{
679 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
680 DOMAIN_FLAG_STATIC_IDENTITY);
681}
Weidong Han1b573682008-12-08 15:34:06 +0800682
Jiang Liu162d1b12014-07-11 14:19:35 +0800683static inline int domain_pfn_supported(struct dmar_domain *domain,
684 unsigned long pfn)
685{
686 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
687
688 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
689}
690
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700691static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800692{
693 unsigned long sagaw;
694 int agaw = -1;
695
696 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700697 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800698 agaw >= 0; agaw--) {
699 if (test_bit(agaw, &sagaw))
700 break;
701 }
702
703 return agaw;
704}
705
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700706/*
707 * Calculate max SAGAW for each iommu.
708 */
709int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
710{
711 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
712}
713
714/*
715 * calculate agaw for each iommu.
716 * "SAGAW" may be different across iommus, use a default agaw, and
717 * get a supported less agaw for iommus that don't support the default agaw.
718 */
719int iommu_calculate_agaw(struct intel_iommu *iommu)
720{
721 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
722}
723
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700724/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800725static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
726{
727 int iommu_id;
728
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700729 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800730 BUG_ON(domain_type_is_vm_or_si(domain));
Joerg Roedel29a27712015-07-21 17:17:12 +0200731 for_each_domain_iommu(iommu_id, domain)
732 break;
733
Weidong Han8c11e792008-12-08 15:29:22 +0800734 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
735 return NULL;
736
737 return g_iommus[iommu_id];
738}
739
Weidong Han8e6040972008-12-08 15:49:06 +0800740static void domain_update_iommu_coherency(struct dmar_domain *domain)
741{
David Woodhoused0501962014-03-11 17:10:29 -0700742 struct dmar_drhd_unit *drhd;
743 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100744 bool found = false;
745 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800746
David Woodhoused0501962014-03-11 17:10:29 -0700747 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800748
Joerg Roedel29a27712015-07-21 17:17:12 +0200749 for_each_domain_iommu(i, domain) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100750 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800751 if (!ecap_coherent(g_iommus[i]->ecap)) {
752 domain->iommu_coherency = 0;
753 break;
754 }
Weidong Han8e6040972008-12-08 15:49:06 +0800755 }
David Woodhoused0501962014-03-11 17:10:29 -0700756 if (found)
757 return;
758
759 /* No hardware attached; use lowest common denominator */
760 rcu_read_lock();
761 for_each_active_iommu(iommu, drhd) {
762 if (!ecap_coherent(iommu->ecap)) {
763 domain->iommu_coherency = 0;
764 break;
765 }
766 }
767 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800768}
769
Jiang Liu161f6932014-07-11 14:19:37 +0800770static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100771{
Allen Kay8140a952011-10-14 12:32:17 -0700772 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800773 struct intel_iommu *iommu;
774 int ret = 1;
775
776 rcu_read_lock();
777 for_each_active_iommu(iommu, drhd) {
778 if (iommu != skip) {
779 if (!ecap_sc_support(iommu->ecap)) {
780 ret = 0;
781 break;
782 }
783 }
784 }
785 rcu_read_unlock();
786
787 return ret;
788}
789
790static int domain_update_iommu_superpage(struct intel_iommu *skip)
791{
792 struct dmar_drhd_unit *drhd;
793 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700794 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100795
796 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800797 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100798 }
799
Allen Kay8140a952011-10-14 12:32:17 -0700800 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e2426122014-02-19 14:07:34 +0800801 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700802 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800803 if (iommu != skip) {
804 mask &= cap_super_page_val(iommu->cap);
805 if (!mask)
806 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100807 }
808 }
Jiang Liu0e2426122014-02-19 14:07:34 +0800809 rcu_read_unlock();
810
Jiang Liu161f6932014-07-11 14:19:37 +0800811 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100812}
813
Sheng Yang58c610b2009-03-18 15:33:05 +0800814/* Some capabilities may be different across iommus */
815static void domain_update_iommu_cap(struct dmar_domain *domain)
816{
817 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800818 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
819 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800820}
821
David Woodhouse03ecc322015-02-13 14:35:21 +0000822static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
823 u8 bus, u8 devfn, int alloc)
824{
825 struct root_entry *root = &iommu->root_entry[bus];
826 struct context_entry *context;
827 u64 *entry;
828
Joerg Roedel4df4eab2015-08-25 10:54:28 +0200829 entry = &root->lo;
David Woodhousec83b2f22015-06-12 10:15:49 +0100830 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000831 if (devfn >= 0x80) {
832 devfn -= 0x80;
833 entry = &root->hi;
834 }
835 devfn *= 2;
836 }
David Woodhouse03ecc322015-02-13 14:35:21 +0000837 if (*entry & 1)
838 context = phys_to_virt(*entry & VTD_PAGE_MASK);
839 else {
840 unsigned long phy_addr;
841 if (!alloc)
842 return NULL;
843
844 context = alloc_pgtable_page(iommu->node);
845 if (!context)
846 return NULL;
847
848 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
849 phy_addr = virt_to_phys((void *)context);
850 *entry = phy_addr | 1;
851 __iommu_flush_cache(iommu, entry, sizeof(*entry));
852 }
853 return &context[devfn];
854}
855
David Woodhouse4ed6a542015-05-11 14:59:20 +0100856static int iommu_dummy(struct device *dev)
857{
858 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
859}
860
David Woodhouse156baca2014-03-09 14:00:57 -0700861static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800862{
863 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800864 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700865 struct device *tmp;
866 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800867 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800868 int i;
869
David Woodhouse4ed6a542015-05-11 14:59:20 +0100870 if (iommu_dummy(dev))
871 return NULL;
872
David Woodhouse156baca2014-03-09 14:00:57 -0700873 if (dev_is_pci(dev)) {
Ashok Raj1c387182016-10-21 15:32:05 -0700874 struct pci_dev *pf_pdev;
875
David Woodhouse156baca2014-03-09 14:00:57 -0700876 pdev = to_pci_dev(dev);
Jon Derrick5823e332017-08-30 15:05:59 -0600877
878#ifdef CONFIG_X86
879 /* VMD child devices currently cannot be handled individually */
880 if (is_vmd(pdev->bus))
881 return NULL;
882#endif
883
Ashok Raj1c387182016-10-21 15:32:05 -0700884 /* VFs aren't listed in scope tables; we need to look up
885 * the PF instead to find the IOMMU. */
886 pf_pdev = pci_physfn(pdev);
887 dev = &pf_pdev->dev;
David Woodhouse156baca2014-03-09 14:00:57 -0700888 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100889 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700890 dev = &ACPI_COMPANION(dev)->dev;
891
Jiang Liu0e2426122014-02-19 14:07:34 +0800892 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800893 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700894 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100895 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800896
Jiang Liub683b232014-02-19 14:07:32 +0800897 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700898 drhd->devices_cnt, i, tmp) {
899 if (tmp == dev) {
Ashok Raj1c387182016-10-21 15:32:05 -0700900 /* For a VF use its original BDF# not that of the PF
901 * which we used for the IOMMU lookup. Strictly speaking
902 * we could do this for all PCI devices; we only need to
903 * get the BDF# from the scope table for ACPI matches. */
Koos Vriezen5003ae12017-03-01 21:02:50 +0100904 if (pdev && pdev->is_virtfn)
Ashok Raj1c387182016-10-21 15:32:05 -0700905 goto got_pdev;
906
David Woodhouse156baca2014-03-09 14:00:57 -0700907 *bus = drhd->devices[i].bus;
908 *devfn = drhd->devices[i].devfn;
909 goto out;
910 }
911
912 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000913 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700914
915 ptmp = to_pci_dev(tmp);
916 if (ptmp->subordinate &&
917 ptmp->subordinate->number <= pdev->bus->number &&
918 ptmp->subordinate->busn_res.end >= pdev->bus->number)
919 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100920 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800921
David Woodhouse156baca2014-03-09 14:00:57 -0700922 if (pdev && drhd->include_all) {
923 got_pdev:
924 *bus = pdev->bus->number;
925 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800926 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700927 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800928 }
Jiang Liub683b232014-02-19 14:07:32 +0800929 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700930 out:
Jiang Liu0e2426122014-02-19 14:07:34 +0800931 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800932
Jiang Liub683b232014-02-19 14:07:32 +0800933 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800934}
935
Weidong Han5331fe62008-12-08 23:00:00 +0800936static void domain_flush_cache(struct dmar_domain *domain,
937 void *addr, int size)
938{
939 if (!domain->iommu_coherency)
940 clflush_cache_range(addr, size);
941}
942
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
944{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000946 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 unsigned long flags;
948
949 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000950 context = iommu_context_addr(iommu, bus, devfn, 0);
951 if (context)
952 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953 spin_unlock_irqrestore(&iommu->lock, flags);
954 return ret;
955}
956
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957static void free_context_table(struct intel_iommu *iommu)
958{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 int i;
960 unsigned long flags;
961 struct context_entry *context;
962
963 spin_lock_irqsave(&iommu->lock, flags);
964 if (!iommu->root_entry) {
965 goto out;
966 }
967 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000968 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969 if (context)
970 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000971
David Woodhousec83b2f22015-06-12 10:15:49 +0100972 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000973 continue;
974
975 context = iommu_context_addr(iommu, i, 0x80, 0);
976 if (context)
977 free_pgtable_page(context);
978
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979 }
980 free_pgtable_page(iommu->root_entry);
981 iommu->root_entry = NULL;
982out:
983 spin_unlock_irqrestore(&iommu->lock, flags);
984}
985
David Woodhouseb026fd22009-06-28 10:37:25 +0100986static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000987 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 struct dma_pte *parent, *pte = NULL;
990 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700991 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992
993 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200994
Jiang Liu162d1b12014-07-11 14:19:35 +0800995 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200996 /* Address beyond IOMMU's addressing capabilities. */
997 return NULL;
998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999 parent = domain->pgd;
1000
David Woodhouse5cf0a762014-03-19 16:07:49 +00001001 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002 void *tmp_page;
1003
David Woodhouseb026fd22009-06-28 10:37:25 +01001004 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +00001006 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001007 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +00001008 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009 break;
1010
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001011 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +01001012 uint64_t pteval;
1013
Suresh Siddha4c923d42009-10-02 11:01:24 -07001014 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015
David Woodhouse206a73c2009-07-01 19:30:28 +01001016 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +01001018
David Woodhousec85994e2009-07-01 19:21:24 +01001019 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -04001020 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +08001021 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +01001022 /* Someone else set it while we were thinking; use theirs. */
1023 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +08001024 else
David Woodhousec85994e2009-07-01 19:21:24 +01001025 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001026 }
David Woodhouse5cf0a762014-03-19 16:07:49 +00001027 if (level == 1)
1028 break;
1029
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001030 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001031 level--;
1032 }
1033
David Woodhouse5cf0a762014-03-19 16:07:49 +00001034 if (!*target_level)
1035 *target_level = level;
1036
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001037 return pte;
1038}
1039
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001040
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +01001042static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1043 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001044 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001045{
1046 struct dma_pte *parent, *pte = NULL;
1047 int total = agaw_to_level(domain->agaw);
1048 int offset;
1049
1050 parent = domain->pgd;
1051 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +01001052 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001053 pte = &parent[offset];
1054 if (level == total)
1055 return pte;
1056
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001057 if (!dma_pte_present(pte)) {
1058 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001059 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001060 }
1061
Yijing Wange16922a2014-05-20 20:37:51 +08001062 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001063 *large_page = total;
1064 return pte;
1065 }
1066
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001067 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068 total--;
1069 }
1070 return NULL;
1071}
1072
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001073/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001074static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +01001075 unsigned long start_pfn,
1076 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001077{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001078 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001079 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001080
Jiang Liu162d1b12014-07-11 14:19:35 +08001081 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1082 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001083 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001084
David Woodhouse04b18e62009-06-27 19:15:01 +01001085 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001086 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001087 large_page = 1;
1088 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001089 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001090 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001091 continue;
1092 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001093 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001094 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001095 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001096 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001097 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1098
David Woodhouse310a5ab2009-06-28 18:52:20 +01001099 domain_flush_cache(domain, first_pte,
1100 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001101
1102 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001103}
1104
Alex Williamson3269ee02013-06-15 10:27:19 -06001105static void dma_pte_free_level(struct dmar_domain *domain, int level,
David Dillowbc24c572017-06-28 19:42:23 -07001106 int retain_level, struct dma_pte *pte,
1107 unsigned long pfn, unsigned long start_pfn,
1108 unsigned long last_pfn)
Alex Williamson3269ee02013-06-15 10:27:19 -06001109{
1110 pfn = max(start_pfn, pfn);
1111 pte = &pte[pfn_level_offset(pfn, level)];
1112
1113 do {
1114 unsigned long level_pfn;
1115 struct dma_pte *level_pte;
1116
1117 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1118 goto next;
1119
David Dillowf7116e12017-01-30 19:11:11 -08001120 level_pfn = pfn & level_mask(level);
Alex Williamson3269ee02013-06-15 10:27:19 -06001121 level_pte = phys_to_virt(dma_pte_addr(pte));
1122
David Dillowbc24c572017-06-28 19:42:23 -07001123 if (level > 2) {
1124 dma_pte_free_level(domain, level - 1, retain_level,
1125 level_pte, level_pfn, start_pfn,
1126 last_pfn);
1127 }
Alex Williamson3269ee02013-06-15 10:27:19 -06001128
David Dillowbc24c572017-06-28 19:42:23 -07001129 /*
1130 * Free the page table if we're below the level we want to
1131 * retain and the range covers the entire table.
1132 */
1133 if (level < retain_level && !(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001134 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001135 dma_clear_pte(pte);
1136 domain_flush_cache(domain, pte, sizeof(*pte));
1137 free_pgtable_page(level_pte);
1138 }
1139next:
1140 pfn += level_size(level);
1141 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1142}
1143
David Dillowbc24c572017-06-28 19:42:23 -07001144/*
1145 * clear last level (leaf) ptes and free page table pages below the
1146 * level we wish to keep intact.
1147 */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001149 unsigned long start_pfn,
David Dillowbc24c572017-06-28 19:42:23 -07001150 unsigned long last_pfn,
1151 int retain_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001152{
Jiang Liu162d1b12014-07-11 14:19:35 +08001153 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1154 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001155 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001156
Jiang Liud41a4ad2014-07-11 14:19:34 +08001157 dma_pte_clear_range(domain, start_pfn, last_pfn);
1158
David Woodhousef3a0a522009-06-30 03:40:07 +01001159 /* We don't need lock here; nobody else touches the iova range */
David Dillowbc24c572017-06-28 19:42:23 -07001160 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
Alex Williamson3269ee02013-06-15 10:27:19 -06001161 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001162
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001164 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165 free_pgtable_page(domain->pgd);
1166 domain->pgd = NULL;
1167 }
1168}
1169
David Woodhouseea8ea462014-03-05 17:09:32 +00001170/* When a page at a given level is being unlinked from its parent, we don't
1171 need to *modify* it at all. All we need to do is make a list of all the
1172 pages which can be freed just as soon as we've flushed the IOTLB and we
1173 know the hardware page-walk will no longer touch them.
1174 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1175 be freed. */
1176static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1177 int level, struct dma_pte *pte,
1178 struct page *freelist)
1179{
1180 struct page *pg;
1181
1182 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1183 pg->freelist = freelist;
1184 freelist = pg;
1185
1186 if (level == 1)
1187 return freelist;
1188
Jiang Liuadeb2592014-04-09 10:20:39 +08001189 pte = page_address(pg);
1190 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001191 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1192 freelist = dma_pte_list_pagetables(domain, level - 1,
1193 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001194 pte++;
1195 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001196
1197 return freelist;
1198}
1199
1200static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1201 struct dma_pte *pte, unsigned long pfn,
1202 unsigned long start_pfn,
1203 unsigned long last_pfn,
1204 struct page *freelist)
1205{
1206 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1207
1208 pfn = max(start_pfn, pfn);
1209 pte = &pte[pfn_level_offset(pfn, level)];
1210
1211 do {
1212 unsigned long level_pfn;
1213
1214 if (!dma_pte_present(pte))
1215 goto next;
1216
1217 level_pfn = pfn & level_mask(level);
1218
1219 /* If range covers entire pagetable, free it */
1220 if (start_pfn <= level_pfn &&
1221 last_pfn >= level_pfn + level_size(level) - 1) {
1222 /* These suborbinate page tables are going away entirely. Don't
1223 bother to clear them; we're just going to *free* them. */
1224 if (level > 1 && !dma_pte_superpage(pte))
1225 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1226
1227 dma_clear_pte(pte);
1228 if (!first_pte)
1229 first_pte = pte;
1230 last_pte = pte;
1231 } else if (level > 1) {
1232 /* Recurse down into a level that isn't *entirely* obsolete */
1233 freelist = dma_pte_clear_level(domain, level - 1,
1234 phys_to_virt(dma_pte_addr(pte)),
1235 level_pfn, start_pfn, last_pfn,
1236 freelist);
1237 }
1238next:
1239 pfn += level_size(level);
1240 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1241
1242 if (first_pte)
1243 domain_flush_cache(domain, first_pte,
1244 (void *)++last_pte - (void *)first_pte);
1245
1246 return freelist;
1247}
1248
1249/* We can't just free the pages because the IOMMU may still be walking
1250 the page tables, and may have cached the intermediate levels. The
1251 pages can only be freed after the IOTLB flush has been done. */
Joerg Roedelb6904202015-08-13 11:32:18 +02001252static struct page *domain_unmap(struct dmar_domain *domain,
1253 unsigned long start_pfn,
1254 unsigned long last_pfn)
David Woodhouseea8ea462014-03-05 17:09:32 +00001255{
David Woodhouseea8ea462014-03-05 17:09:32 +00001256 struct page *freelist = NULL;
1257
Jiang Liu162d1b12014-07-11 14:19:35 +08001258 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1259 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001260 BUG_ON(start_pfn > last_pfn);
1261
1262 /* we don't need lock here; nobody else touches the iova range */
1263 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1264 domain->pgd, 0, start_pfn, last_pfn, NULL);
1265
1266 /* free pgd */
1267 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1268 struct page *pgd_page = virt_to_page(domain->pgd);
1269 pgd_page->freelist = freelist;
1270 freelist = pgd_page;
1271
1272 domain->pgd = NULL;
1273 }
1274
1275 return freelist;
1276}
1277
Joerg Roedelb6904202015-08-13 11:32:18 +02001278static void dma_free_pagelist(struct page *freelist)
David Woodhouseea8ea462014-03-05 17:09:32 +00001279{
1280 struct page *pg;
1281
1282 while ((pg = freelist)) {
1283 freelist = pg->freelist;
1284 free_pgtable_page(page_address(pg));
1285 }
1286}
1287
Joerg Roedel13cf0172017-08-11 11:40:10 +02001288static void iova_entry_free(unsigned long data)
1289{
1290 struct page *freelist = (struct page *)data;
1291
1292 dma_free_pagelist(freelist);
1293}
1294
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001295/* iommu handling */
1296static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1297{
1298 struct root_entry *root;
1299 unsigned long flags;
1300
Suresh Siddha4c923d42009-10-02 11:01:24 -07001301 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001302 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001303 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001304 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001306 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001308 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309
1310 spin_lock_irqsave(&iommu->lock, flags);
1311 iommu->root_entry = root;
1312 spin_unlock_irqrestore(&iommu->lock, flags);
1313
1314 return 0;
1315}
1316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317static void iommu_set_root_entry(struct intel_iommu *iommu)
1318{
David Woodhouse03ecc322015-02-13 14:35:21 +00001319 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001320 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321 unsigned long flag;
1322
David Woodhouse03ecc322015-02-13 14:35:21 +00001323 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001324 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001325 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001327 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001328 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329
David Woodhousec416daa2009-05-10 20:30:58 +01001330 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001331
1332 /* Make sure hardware complete it */
1333 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001334 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001336 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337}
1338
1339static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1340{
1341 u32 val;
1342 unsigned long flag;
1343
David Woodhouse9af88142009-02-13 23:18:03 +00001344 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001346
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001347 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001348 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349
1350 /* Make sure hardware complete it */
1351 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001352 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001353
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001354 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001355}
1356
1357/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001358static void __iommu_flush_context(struct intel_iommu *iommu,
1359 u16 did, u16 source_id, u8 function_mask,
1360 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001361{
1362 u64 val = 0;
1363 unsigned long flag;
1364
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365 switch (type) {
1366 case DMA_CCMD_GLOBAL_INVL:
1367 val = DMA_CCMD_GLOBAL_INVL;
1368 break;
1369 case DMA_CCMD_DOMAIN_INVL:
1370 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1371 break;
1372 case DMA_CCMD_DEVICE_INVL:
1373 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1374 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1375 break;
1376 default:
1377 BUG();
1378 }
1379 val |= DMA_CCMD_ICC;
1380
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001381 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1383
1384 /* Make sure hardware complete it */
1385 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1386 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1387
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001388 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001389}
1390
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001391/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001392static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1393 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394{
1395 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1396 u64 val = 0, val_iva = 0;
1397 unsigned long flag;
1398
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399 switch (type) {
1400 case DMA_TLB_GLOBAL_FLUSH:
1401 /* global flush doesn't need set IVA_REG */
1402 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1403 break;
1404 case DMA_TLB_DSI_FLUSH:
1405 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1406 break;
1407 case DMA_TLB_PSI_FLUSH:
1408 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001409 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 val_iva = size_order | addr;
1411 break;
1412 default:
1413 BUG();
1414 }
1415 /* Note: set drain read/write */
1416#if 0
1417 /*
1418 * This is probably to be super secure.. Looks like we can
1419 * ignore it without any impact.
1420 */
1421 if (cap_read_drain(iommu->cap))
1422 val |= DMA_TLB_READ_DRAIN;
1423#endif
1424 if (cap_write_drain(iommu->cap))
1425 val |= DMA_TLB_WRITE_DRAIN;
1426
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001427 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428 /* Note: Only uses first TLB reg currently */
1429 if (val_iva)
1430 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1431 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1432
1433 /* Make sure hardware complete it */
1434 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1435 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1436
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001437 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438
1439 /* check IOTLB invalidation granularity */
1440 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001441 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001443 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001444 (unsigned long long)DMA_TLB_IIRG(type),
1445 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446}
1447
David Woodhouse64ae8922014-03-09 12:52:30 -07001448static struct device_domain_info *
1449iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1450 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001451{
Yu Zhao93a23a72009-05-18 13:51:37 +08001452 struct device_domain_info *info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001453
Joerg Roedel55d94042015-07-22 16:50:40 +02001454 assert_spin_locked(&device_domain_lock);
1455
Yu Zhao93a23a72009-05-18 13:51:37 +08001456 if (!iommu->qi)
1457 return NULL;
1458
Yu Zhao93a23a72009-05-18 13:51:37 +08001459 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001460 if (info->iommu == iommu && info->bus == bus &&
1461 info->devfn == devfn) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001462 if (info->ats_supported && info->dev)
1463 return info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001464 break;
1465 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001466
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001467 return NULL;
Yu Zhao93a23a72009-05-18 13:51:37 +08001468}
1469
Omer Peleg0824c592016-04-20 19:03:35 +03001470static void domain_update_iotlb(struct dmar_domain *domain)
1471{
1472 struct device_domain_info *info;
1473 bool has_iotlb_device = false;
1474
1475 assert_spin_locked(&device_domain_lock);
1476
1477 list_for_each_entry(info, &domain->devices, link) {
1478 struct pci_dev *pdev;
1479
1480 if (!info->dev || !dev_is_pci(info->dev))
1481 continue;
1482
1483 pdev = to_pci_dev(info->dev);
1484 if (pdev->ats_enabled) {
1485 has_iotlb_device = true;
1486 break;
1487 }
1488 }
1489
1490 domain->has_iotlb_device = has_iotlb_device;
1491}
1492
Yu Zhao93a23a72009-05-18 13:51:37 +08001493static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1494{
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001495 struct pci_dev *pdev;
1496
Omer Peleg0824c592016-04-20 19:03:35 +03001497 assert_spin_locked(&device_domain_lock);
1498
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001499 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001500 return;
1501
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001502 pdev = to_pci_dev(info->dev);
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001503
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001504#ifdef CONFIG_INTEL_IOMMU_SVM
1505 /* The PCIe spec, in its wisdom, declares that the behaviour of
1506 the device if you enable PASID support after ATS support is
1507 undefined. So always enable PASID support on devices which
1508 have it, even if we can't yet know if we're ever going to
1509 use it. */
1510 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1511 info->pasid_enabled = 1;
1512
1513 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1514 info->pri_enabled = 1;
1515#endif
1516 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1517 info->ats_enabled = 1;
Omer Peleg0824c592016-04-20 19:03:35 +03001518 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001519 info->ats_qdep = pci_ats_queue_depth(pdev);
1520 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001521}
1522
1523static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1524{
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001525 struct pci_dev *pdev;
1526
Omer Peleg0824c592016-04-20 19:03:35 +03001527 assert_spin_locked(&device_domain_lock);
1528
Jeremy McNicollda972fb2016-01-14 21:33:06 -08001529 if (!dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001530 return;
1531
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001532 pdev = to_pci_dev(info->dev);
1533
1534 if (info->ats_enabled) {
1535 pci_disable_ats(pdev);
1536 info->ats_enabled = 0;
Omer Peleg0824c592016-04-20 19:03:35 +03001537 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001538 }
1539#ifdef CONFIG_INTEL_IOMMU_SVM
1540 if (info->pri_enabled) {
1541 pci_disable_pri(pdev);
1542 info->pri_enabled = 0;
1543 }
1544 if (info->pasid_enabled) {
1545 pci_disable_pasid(pdev);
1546 info->pasid_enabled = 0;
1547 }
1548#endif
Yu Zhao93a23a72009-05-18 13:51:37 +08001549}
1550
1551static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1552 u64 addr, unsigned mask)
1553{
1554 u16 sid, qdep;
1555 unsigned long flags;
1556 struct device_domain_info *info;
1557
Omer Peleg0824c592016-04-20 19:03:35 +03001558 if (!domain->has_iotlb_device)
1559 return;
1560
Yu Zhao93a23a72009-05-18 13:51:37 +08001561 spin_lock_irqsave(&device_domain_lock, flags);
1562 list_for_each_entry(info, &domain->devices, link) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001563 if (!info->ats_enabled)
Yu Zhao93a23a72009-05-18 13:51:37 +08001564 continue;
1565
1566 sid = info->bus << 8 | info->devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001567 qdep = info->ats_qdep;
Yu Zhao93a23a72009-05-18 13:51:37 +08001568 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1569 }
1570 spin_unlock_irqrestore(&device_domain_lock, flags);
1571}
1572
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001573static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1574 struct dmar_domain *domain,
1575 unsigned long pfn, unsigned int pages,
1576 int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001577{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001578 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001579 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001580 u16 did = domain->iommu_did[iommu->seq_id];
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001581
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582 BUG_ON(pages == 0);
1583
David Woodhouseea8ea462014-03-05 17:09:32 +00001584 if (ih)
1585 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001586 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001587 * Fallback to domain selective flush if no PSI support or the size is
1588 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001589 * PSI requires page size to be 2 ^ x, and the base address is naturally
1590 * aligned to the size
1591 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001592 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1593 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001594 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001595 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001596 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001597 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001598
1599 /*
Nadav Amit82653632010-04-01 13:24:40 +03001600 * In caching mode, changes of pages from non-present to present require
1601 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001602 */
Nadav Amit82653632010-04-01 13:24:40 +03001603 if (!cap_caching_mode(iommu->cap) || !map)
Peter Xu9d2e6502018-01-10 13:51:37 +08001604 iommu_flush_dev_iotlb(domain, addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001605}
1606
Joerg Roedel13cf0172017-08-11 11:40:10 +02001607static void iommu_flush_iova(struct iova_domain *iovad)
1608{
1609 struct dmar_domain *domain;
1610 int idx;
1611
1612 domain = container_of(iovad, struct dmar_domain, iovad);
1613
1614 for_each_domain_iommu(idx, domain) {
1615 struct intel_iommu *iommu = g_iommus[idx];
1616 u16 did = domain->iommu_did[iommu->seq_id];
1617
1618 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1619
1620 if (!cap_caching_mode(iommu->cap))
1621 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1622 0, MAX_AGAW_PFN_WIDTH);
1623 }
1624}
1625
mark grossf8bab732008-02-08 04:18:38 -08001626static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1627{
1628 u32 pmen;
1629 unsigned long flags;
1630
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001631 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001632 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1633 pmen &= ~DMA_PMEN_EPM;
1634 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1635
1636 /* wait for the protected region status bit to clear */
1637 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1638 readl, !(pmen & DMA_PMEN_PRS), pmen);
1639
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001640 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001641}
1642
Jiang Liu2a41cce2014-07-11 14:19:33 +08001643static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644{
1645 u32 sts;
1646 unsigned long flags;
1647
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001648 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001649 iommu->gcmd |= DMA_GCMD_TE;
1650 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651
1652 /* Make sure hardware complete it */
1653 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001654 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001655
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001656 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001657}
1658
Jiang Liu2a41cce2014-07-11 14:19:33 +08001659static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660{
1661 u32 sts;
1662 unsigned long flag;
1663
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001664 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001665 iommu->gcmd &= ~DMA_GCMD_TE;
1666 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1667
1668 /* Make sure hardware complete it */
1669 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001670 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001672 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001673}
1674
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001675
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676static int iommu_init_domains(struct intel_iommu *iommu)
1677{
Joerg Roedel8bf47812015-07-21 10:41:21 +02001678 u32 ndomains, nlongs;
1679 size_t size;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001680
1681 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001682 pr_debug("%s: Number of Domains supported <%d>\n",
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001683 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001684 nlongs = BITS_TO_LONGS(ndomains);
1685
Donald Dutile94a91b502009-08-20 16:51:34 -04001686 spin_lock_init(&iommu->lock);
1687
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001688 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1689 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001690 pr_err("%s: Allocating domain id array failed\n",
1691 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001692 return -ENOMEM;
1693 }
Joerg Roedel8bf47812015-07-21 10:41:21 +02001694
Wei Yang86f004c2016-05-21 02:41:51 +00001695 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001696 iommu->domains = kzalloc(size, GFP_KERNEL);
1697
1698 if (iommu->domains) {
1699 size = 256 * sizeof(struct dmar_domain *);
1700 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1701 }
1702
1703 if (!iommu->domains || !iommu->domains[0]) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001704 pr_err("%s: Allocating domain array failed\n",
1705 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001706 kfree(iommu->domain_ids);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001707 kfree(iommu->domains);
Jiang Liu852bdb02014-01-06 14:18:11 +08001708 iommu->domain_ids = NULL;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001709 iommu->domains = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001710 return -ENOMEM;
1711 }
1712
Joerg Roedel8bf47812015-07-21 10:41:21 +02001713
1714
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001715 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001716 * If Caching mode is set, then invalid translations are tagged
1717 * with domain-id 0, hence we need to pre-allocate it. We also
1718 * use domain-id 0 as a marker for non-allocated domain-id, so
1719 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001720 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001721 set_bit(0, iommu->domain_ids);
1722
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723 return 0;
1724}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725
Jiang Liuffebeb42014-11-09 22:48:02 +08001726static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001727{
Joerg Roedel29a27712015-07-21 17:17:12 +02001728 struct device_domain_info *info, *tmp;
Joerg Roedel55d94042015-07-22 16:50:40 +02001729 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001730
Joerg Roedel29a27712015-07-21 17:17:12 +02001731 if (!iommu->domains || !iommu->domain_ids)
1732 return;
Jiang Liua4eaa862014-02-19 14:07:30 +08001733
Joerg Roedelbea64032016-11-08 15:08:26 +01001734again:
Joerg Roedel55d94042015-07-22 16:50:40 +02001735 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001736 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1737 struct dmar_domain *domain;
1738
1739 if (info->iommu != iommu)
1740 continue;
1741
1742 if (!info->dev || !info->domain)
1743 continue;
1744
1745 domain = info->domain;
1746
Joerg Roedelbea64032016-11-08 15:08:26 +01001747 __dmar_remove_one_dev_info(info);
Joerg Roedel29a27712015-07-21 17:17:12 +02001748
Joerg Roedelbea64032016-11-08 15:08:26 +01001749 if (!domain_type_is_vm_or_si(domain)) {
1750 /*
1751 * The domain_exit() function can't be called under
1752 * device_domain_lock, as it takes this lock itself.
1753 * So release the lock here and re-run the loop
1754 * afterwards.
1755 */
1756 spin_unlock_irqrestore(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001757 domain_exit(domain);
Joerg Roedelbea64032016-11-08 15:08:26 +01001758 goto again;
1759 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001760 }
Joerg Roedel55d94042015-07-22 16:50:40 +02001761 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001762
1763 if (iommu->gcmd & DMA_GCMD_TE)
1764 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001765}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001766
Jiang Liuffebeb42014-11-09 22:48:02 +08001767static void free_dmar_iommu(struct intel_iommu *iommu)
1768{
1769 if ((iommu->domains) && (iommu->domain_ids)) {
Wei Yang86f004c2016-05-21 02:41:51 +00001770 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001771 int i;
1772
1773 for (i = 0; i < elems; i++)
1774 kfree(iommu->domains[i]);
Jiang Liuffebeb42014-11-09 22:48:02 +08001775 kfree(iommu->domains);
1776 kfree(iommu->domain_ids);
1777 iommu->domains = NULL;
1778 iommu->domain_ids = NULL;
1779 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001780
Weidong Hand9630fe2008-12-08 11:06:32 +08001781 g_iommus[iommu->seq_id] = NULL;
1782
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001783 /* free context mapping */
1784 free_context_table(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001785
1786#ifdef CONFIG_INTEL_IOMMU_SVM
David Woodhousea222a7f2015-10-07 23:35:18 +01001787 if (pasid_enabled(iommu)) {
1788 if (ecap_prs(iommu->ecap))
1789 intel_svm_finish_prq(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001790 intel_svm_free_pasid_tables(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01001791 }
David Woodhouse8a94ade2015-03-24 14:54:56 +00001792#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793}
1794
Jiang Liuab8dfe22014-07-11 14:19:27 +08001795static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001796{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001798
1799 domain = alloc_domain_mem();
1800 if (!domain)
1801 return NULL;
1802
Jiang Liuab8dfe22014-07-11 14:19:27 +08001803 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001804 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001805 domain->flags = flags;
Omer Peleg0824c592016-04-20 19:03:35 +03001806 domain->has_iotlb_device = false;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001807 INIT_LIST_HEAD(&domain->devices);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001808
1809 return domain;
1810}
1811
Joerg Roedeld160aca2015-07-22 11:52:53 +02001812/* Must be called with iommu->lock */
1813static int domain_attach_iommu(struct dmar_domain *domain,
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001814 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001815{
Jiang Liu44bde612014-07-11 14:19:29 +08001816 unsigned long ndomains;
Joerg Roedel55d94042015-07-22 16:50:40 +02001817 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001818
Joerg Roedel55d94042015-07-22 16:50:40 +02001819 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001820 assert_spin_locked(&iommu->lock);
Jiang Liu44bde612014-07-11 14:19:29 +08001821
Joerg Roedel29a27712015-07-21 17:17:12 +02001822 domain->iommu_refcnt[iommu->seq_id] += 1;
1823 domain->iommu_count += 1;
1824 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
Jiang Liufb170fb2014-07-11 14:19:28 +08001825 ndomains = cap_ndoms(iommu->cap);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001826 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1827
1828 if (num >= ndomains) {
1829 pr_err("%s: No free domain ids\n", iommu->name);
1830 domain->iommu_refcnt[iommu->seq_id] -= 1;
1831 domain->iommu_count -= 1;
Joerg Roedel55d94042015-07-22 16:50:40 +02001832 return -ENOSPC;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001833 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001834
Joerg Roedeld160aca2015-07-22 11:52:53 +02001835 set_bit(num, iommu->domain_ids);
1836 set_iommu_domain(iommu, num, domain);
Jiang Liufb170fb2014-07-11 14:19:28 +08001837
Joerg Roedeld160aca2015-07-22 11:52:53 +02001838 domain->iommu_did[iommu->seq_id] = num;
1839 domain->nid = iommu->node;
1840
Jiang Liufb170fb2014-07-11 14:19:28 +08001841 domain_update_iommu_cap(domain);
1842 }
Joerg Roedeld160aca2015-07-22 11:52:53 +02001843
Joerg Roedel55d94042015-07-22 16:50:40 +02001844 return 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001845}
1846
1847static int domain_detach_iommu(struct dmar_domain *domain,
1848 struct intel_iommu *iommu)
1849{
Joerg Roedeld160aca2015-07-22 11:52:53 +02001850 int num, count = INT_MAX;
Jiang Liufb170fb2014-07-11 14:19:28 +08001851
Joerg Roedel55d94042015-07-22 16:50:40 +02001852 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001853 assert_spin_locked(&iommu->lock);
Jiang Liufb170fb2014-07-11 14:19:28 +08001854
Joerg Roedel29a27712015-07-21 17:17:12 +02001855 domain->iommu_refcnt[iommu->seq_id] -= 1;
1856 count = --domain->iommu_count;
1857 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02001858 num = domain->iommu_did[iommu->seq_id];
1859 clear_bit(num, iommu->domain_ids);
1860 set_iommu_domain(iommu, num, NULL);
1861
Jiang Liufb170fb2014-07-11 14:19:28 +08001862 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001863 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001864 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001865
1866 return count;
1867}
1868
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001869static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001870static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871
Joseph Cihula51a63e62011-03-21 11:04:24 -07001872static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873{
1874 struct pci_dev *pdev = NULL;
1875 struct iova *iova;
1876 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001877
Zhen Leiaa3ac942017-09-21 16:52:45 +01001878 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001879
Mark Gross8a443df2008-03-04 14:59:31 -08001880 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1881 &reserved_rbtree_key);
1882
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001883 /* IOAPIC ranges shouldn't be accessed by DMA */
1884 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1885 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001886 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001887 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001888 return -ENODEV;
1889 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001890
1891 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1892 for_each_pci_dev(pdev) {
1893 struct resource *r;
1894
1895 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1896 r = &pdev->resource[i];
1897 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1898 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001899 iova = reserve_iova(&reserved_iova_list,
1900 IOVA_PFN(r->start),
1901 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001902 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001903 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001904 return -ENODEV;
1905 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906 }
1907 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001908 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909}
1910
1911static void domain_reserve_special_ranges(struct dmar_domain *domain)
1912{
1913 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1914}
1915
1916static inline int guestwidth_to_adjustwidth(int gaw)
1917{
1918 int agaw;
1919 int r = (gaw - 12) % 9;
1920
1921 if (r == 0)
1922 agaw = gaw;
1923 else
1924 agaw = gaw + 9 - r;
1925 if (agaw > 64)
1926 agaw = 64;
1927 return agaw;
1928}
1929
Joerg Roedeldc534b22015-07-22 12:44:02 +02001930static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1931 int guest_width)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933 int adjust_width, agaw;
1934 unsigned long sagaw;
Joerg Roedel13cf0172017-08-11 11:40:10 +02001935 int err;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936
Zhen Leiaa3ac942017-09-21 16:52:45 +01001937 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
Joerg Roedel13cf0172017-08-11 11:40:10 +02001938
1939 err = init_iova_flush_queue(&domain->iovad,
1940 iommu_flush_iova, iova_entry_free);
1941 if (err)
1942 return err;
1943
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944 domain_reserve_special_ranges(domain);
1945
1946 /* calculate AGAW */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947 if (guest_width > cap_mgaw(iommu->cap))
1948 guest_width = cap_mgaw(iommu->cap);
1949 domain->gaw = guest_width;
1950 adjust_width = guestwidth_to_adjustwidth(guest_width);
1951 agaw = width_to_agaw(adjust_width);
1952 sagaw = cap_sagaw(iommu->cap);
1953 if (!test_bit(agaw, &sagaw)) {
1954 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001955 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001956 agaw = find_next_bit(&sagaw, 5, agaw);
1957 if (agaw >= 5)
1958 return -ENODEV;
1959 }
1960 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001961
Weidong Han8e6040972008-12-08 15:49:06 +08001962 if (ecap_coherent(iommu->ecap))
1963 domain->iommu_coherency = 1;
1964 else
1965 domain->iommu_coherency = 0;
1966
Sheng Yang58c610b2009-03-18 15:33:05 +08001967 if (ecap_sc_support(iommu->ecap))
1968 domain->iommu_snooping = 1;
1969 else
1970 domain->iommu_snooping = 0;
1971
David Woodhouse214e39a2014-03-19 10:38:49 +00001972 if (intel_iommu_superpage)
1973 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1974 else
1975 domain->iommu_superpage = 0;
1976
Suresh Siddha4c923d42009-10-02 11:01:24 -07001977 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001978
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001979 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001980 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001981 if (!domain->pgd)
1982 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001983 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001984 return 0;
1985}
1986
1987static void domain_exit(struct dmar_domain *domain)
1988{
David Woodhouseea8ea462014-03-05 17:09:32 +00001989 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001990
1991 /* Domain 0 is reserved, so dont process it */
1992 if (!domain)
1993 return;
1994
Joerg Roedeld160aca2015-07-22 11:52:53 +02001995 /* Remove associated devices and clear attached or cached domains */
1996 rcu_read_lock();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001997 domain_remove_dev_info(domain);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001998 rcu_read_unlock();
Jiang Liu92d03cc2014-02-19 14:07:28 +08001999
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002000 /* destroy iovas */
2001 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002002
David Woodhouseea8ea462014-03-05 17:09:32 +00002003 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004
David Woodhouseea8ea462014-03-05 17:09:32 +00002005 dma_free_pagelist(freelist);
2006
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002007 free_domain_mem(domain);
2008}
2009
David Woodhouse64ae8922014-03-09 12:52:30 -07002010static int domain_context_mapping_one(struct dmar_domain *domain,
2011 struct intel_iommu *iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002012 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002013{
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002014 u16 did = domain->iommu_did[iommu->seq_id];
Joerg Roedel28ccce02015-07-21 14:45:31 +02002015 int translation = CONTEXT_TT_MULTI_LEVEL;
2016 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002017 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002018 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08002019 struct dma_pte *pgd;
Joerg Roedel55d94042015-07-22 16:50:40 +02002020 int ret, agaw;
Joerg Roedel28ccce02015-07-21 14:45:31 +02002021
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002022 WARN_ON(did == 0);
2023
Joerg Roedel28ccce02015-07-21 14:45:31 +02002024 if (hw_pass_through && domain_type_is_si(domain))
2025 translation = CONTEXT_TT_PASS_THROUGH;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002026
2027 pr_debug("Set context mapping for %02x:%02x.%d\n",
2028 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002029
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002030 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08002031
Joerg Roedel55d94042015-07-22 16:50:40 +02002032 spin_lock_irqsave(&device_domain_lock, flags);
2033 spin_lock(&iommu->lock);
2034
2035 ret = -ENOMEM;
David Woodhouse03ecc322015-02-13 14:35:21 +00002036 context = iommu_context_addr(iommu, bus, devfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002037 if (!context)
Joerg Roedel55d94042015-07-22 16:50:40 +02002038 goto out_unlock;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002039
Joerg Roedel55d94042015-07-22 16:50:40 +02002040 ret = 0;
2041 if (context_present(context))
2042 goto out_unlock;
Joerg Roedelcf484d02015-06-12 12:21:46 +02002043
Xunlei Pangaec0e862016-12-05 20:09:07 +08002044 /*
2045 * For kdump cases, old valid entries may be cached due to the
2046 * in-flight DMA and copied pgtable, but there is no unmapping
2047 * behaviour for them, thus we need an explicit cache flush for
2048 * the newly-mapped device. For kdump, at this point, the device
2049 * is supposed to finish reset at its driver probe stage, so no
2050 * in-flight DMA will exist, and we don't need to worry anymore
2051 * hereafter.
2052 */
2053 if (context_copied(context)) {
2054 u16 did_old = context_domain_id(context);
2055
Christos Gkekasb117e032017-10-08 23:33:31 +01002056 if (did_old < cap_ndoms(iommu->cap)) {
Xunlei Pangaec0e862016-12-05 20:09:07 +08002057 iommu->flush.flush_context(iommu, did_old,
2058 (((u16)bus) << 8) | devfn,
2059 DMA_CCMD_MASK_NOBIT,
2060 DMA_CCMD_DEVICE_INVL);
KarimAllah Ahmedf73a7ee2017-05-05 11:39:59 -07002061 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2062 DMA_TLB_DSI_FLUSH);
2063 }
Xunlei Pangaec0e862016-12-05 20:09:07 +08002064 }
2065
Weidong Hanea6606b2008-12-08 23:08:15 +08002066 pgd = domain->pgd;
2067
Joerg Roedelde24e552015-07-21 14:53:04 +02002068 context_clear_entry(context);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002069 context_set_domain_id(context, did);
Weidong Hanea6606b2008-12-08 23:08:15 +08002070
Joerg Roedelde24e552015-07-21 14:53:04 +02002071 /*
2072 * Skip top levels of page tables for iommu which has less agaw
2073 * than default. Unnecessary for PT mode.
2074 */
Yu Zhao93a23a72009-05-18 13:51:37 +08002075 if (translation != CONTEXT_TT_PASS_THROUGH) {
Joerg Roedelde24e552015-07-21 14:53:04 +02002076 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
Joerg Roedel55d94042015-07-22 16:50:40 +02002077 ret = -ENOMEM;
Joerg Roedelde24e552015-07-21 14:53:04 +02002078 pgd = phys_to_virt(dma_pte_addr(pgd));
Joerg Roedel55d94042015-07-22 16:50:40 +02002079 if (!dma_pte_present(pgd))
2080 goto out_unlock;
Joerg Roedelde24e552015-07-21 14:53:04 +02002081 }
2082
David Woodhouse64ae8922014-03-09 12:52:30 -07002083 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002084 if (info && info->ats_supported)
2085 translation = CONTEXT_TT_DEV_IOTLB;
2086 else
2087 translation = CONTEXT_TT_MULTI_LEVEL;
Joerg Roedelde24e552015-07-21 14:53:04 +02002088
Yu Zhao93a23a72009-05-18 13:51:37 +08002089 context_set_address_root(context, virt_to_phys(pgd));
2090 context_set_address_width(context, iommu->agaw);
Joerg Roedelde24e552015-07-21 14:53:04 +02002091 } else {
2092 /*
2093 * In pass through mode, AW must be programmed to
2094 * indicate the largest AGAW value supported by
2095 * hardware. And ASR is ignored by hardware.
2096 */
2097 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08002098 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002099
2100 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00002101 context_set_fault_enable(context);
2102 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08002103 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002104
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002105 /*
2106 * It's a non-present to present mapping. If hardware doesn't cache
2107 * non-present entry we only need to flush the write-buffer. If the
2108 * _does_ cache non-present entries, then it does so in the special
2109 * domain #0, which we have to flush:
2110 */
2111 if (cap_caching_mode(iommu->cap)) {
2112 iommu->flush.flush_context(iommu, 0,
2113 (((u16)bus) << 8) | devfn,
2114 DMA_CCMD_MASK_NOBIT,
2115 DMA_CCMD_DEVICE_INVL);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002116 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002117 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002118 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002119 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002120 iommu_enable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08002121
Joerg Roedel55d94042015-07-22 16:50:40 +02002122 ret = 0;
2123
2124out_unlock:
2125 spin_unlock(&iommu->lock);
2126 spin_unlock_irqrestore(&device_domain_lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08002127
Wei Yang5c365d12016-07-13 13:53:21 +00002128 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002129}
2130
Alex Williamson579305f2014-07-03 09:51:43 -06002131struct domain_context_mapping_data {
2132 struct dmar_domain *domain;
2133 struct intel_iommu *iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002134};
2135
2136static int domain_context_mapping_cb(struct pci_dev *pdev,
2137 u16 alias, void *opaque)
2138{
2139 struct domain_context_mapping_data *data = opaque;
2140
2141 return domain_context_mapping_one(data->domain, data->iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002142 PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06002143}
2144
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002145static int
Joerg Roedel28ccce02015-07-21 14:45:31 +02002146domain_context_mapping(struct dmar_domain *domain, struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002147{
David Woodhouse64ae8922014-03-09 12:52:30 -07002148 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002149 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06002150 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151
David Woodhousee1f167f2014-03-09 15:24:46 -07002152 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002153 if (!iommu)
2154 return -ENODEV;
2155
Alex Williamson579305f2014-07-03 09:51:43 -06002156 if (!dev_is_pci(dev))
Joerg Roedel28ccce02015-07-21 14:45:31 +02002157 return domain_context_mapping_one(domain, iommu, bus, devfn);
Alex Williamson579305f2014-07-03 09:51:43 -06002158
2159 data.domain = domain;
2160 data.iommu = iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002161
2162 return pci_for_each_dma_alias(to_pci_dev(dev),
2163 &domain_context_mapping_cb, &data);
2164}
2165
2166static int domain_context_mapped_cb(struct pci_dev *pdev,
2167 u16 alias, void *opaque)
2168{
2169 struct intel_iommu *iommu = opaque;
2170
2171 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002172}
2173
David Woodhousee1f167f2014-03-09 15:24:46 -07002174static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002175{
Weidong Han5331fe62008-12-08 23:00:00 +08002176 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002177 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002178
David Woodhousee1f167f2014-03-09 15:24:46 -07002179 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002180 if (!iommu)
2181 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182
Alex Williamson579305f2014-07-03 09:51:43 -06002183 if (!dev_is_pci(dev))
2184 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002185
Alex Williamson579305f2014-07-03 09:51:43 -06002186 return !pci_for_each_dma_alias(to_pci_dev(dev),
2187 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002188}
2189
Fenghua Yuf5329592009-08-04 15:09:37 -07002190/* Returns a number of VTD pages, but aligned to MM page size */
2191static inline unsigned long aligned_nrpages(unsigned long host_addr,
2192 size_t size)
2193{
2194 host_addr &= ~PAGE_MASK;
2195 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2196}
2197
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002198/* Return largest possible superpage level for a given mapping */
2199static inline int hardware_largepage_caps(struct dmar_domain *domain,
2200 unsigned long iov_pfn,
2201 unsigned long phy_pfn,
2202 unsigned long pages)
2203{
2204 int support, level = 1;
2205 unsigned long pfnmerge;
2206
2207 support = domain->iommu_superpage;
2208
2209 /* To use a large page, the virtual *and* physical addresses
2210 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2211 of them will mean we have to use smaller pages. So just
2212 merge them and check both at once. */
2213 pfnmerge = iov_pfn | phy_pfn;
2214
2215 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2216 pages >>= VTD_STRIDE_SHIFT;
2217 if (!pages)
2218 break;
2219 pfnmerge >>= VTD_STRIDE_SHIFT;
2220 level++;
2221 support--;
2222 }
2223 return level;
2224}
2225
David Woodhouse9051aa02009-06-29 12:30:54 +01002226static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2227 struct scatterlist *sg, unsigned long phys_pfn,
2228 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002229{
2230 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002231 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002232 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002233 unsigned int largepage_lvl = 0;
2234 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002235
Jiang Liu162d1b12014-07-11 14:19:35 +08002236 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002237
2238 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2239 return -EINVAL;
2240
2241 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2242
Jiang Liucc4f14a2014-11-26 09:42:10 +08002243 if (!sg) {
2244 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002245 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2246 }
2247
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002248 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002249 uint64_t tmp;
2250
David Woodhousee1605492009-06-29 11:17:38 +01002251 if (!sg_res) {
Robin Murphy29a90b72017-09-28 15:14:01 +01002252 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2253
Fenghua Yuf5329592009-08-04 15:09:37 -07002254 sg_res = aligned_nrpages(sg->offset, sg->length);
Robin Murphy29a90b72017-09-28 15:14:01 +01002255 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
David Woodhousee1605492009-06-29 11:17:38 +01002256 sg->dma_length = sg->length;
Robin Murphy29a90b72017-09-28 15:14:01 +01002257 pteval = (sg_phys(sg) - pgoff) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002258 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002259 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002260
David Woodhousee1605492009-06-29 11:17:38 +01002261 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002262 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2263
David Woodhouse5cf0a762014-03-19 16:07:49 +00002264 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002265 if (!pte)
2266 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002267 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002268 if (largepage_lvl > 1) {
Christian Zanderba2374f2015-06-10 09:41:45 -07002269 unsigned long nr_superpages, end_pfn;
2270
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002271 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002272 lvl_pages = lvl_to_nr_pages(largepage_lvl);
Christian Zanderba2374f2015-06-10 09:41:45 -07002273
2274 nr_superpages = sg_res / lvl_pages;
2275 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2276
Jiang Liud41a4ad2014-07-11 14:19:34 +08002277 /*
2278 * Ensure that old small page tables are
Christian Zanderba2374f2015-06-10 09:41:45 -07002279 * removed to make room for superpage(s).
David Dillowbc24c572017-06-28 19:42:23 -07002280 * We're adding new large pages, so make sure
2281 * we don't remove their parent tables.
Jiang Liud41a4ad2014-07-11 14:19:34 +08002282 */
David Dillowbc24c572017-06-28 19:42:23 -07002283 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2284 largepage_lvl + 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002285 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002286 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002287 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002288
David Woodhousee1605492009-06-29 11:17:38 +01002289 }
2290 /* We don't need lock here, nobody else
2291 * touches the iova range
2292 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002293 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002294 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002295 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002296 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2297 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002298 if (dumps) {
2299 dumps--;
2300 debug_dma_dump_mappings(NULL);
2301 }
2302 WARN_ON(1);
2303 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002304
2305 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2306
2307 BUG_ON(nr_pages < lvl_pages);
2308 BUG_ON(sg_res < lvl_pages);
2309
2310 nr_pages -= lvl_pages;
2311 iov_pfn += lvl_pages;
2312 phys_pfn += lvl_pages;
2313 pteval += lvl_pages * VTD_PAGE_SIZE;
2314 sg_res -= lvl_pages;
2315
2316 /* If the next PTE would be the first in a new page, then we
2317 need to flush the cache on the entries we've just written.
2318 And then we'll need to recalculate 'pte', so clear it and
2319 let it get set again in the if (!pte) block above.
2320
2321 If we're done (!nr_pages) we need to flush the cache too.
2322
2323 Also if we've been setting superpages, we may need to
2324 recalculate 'pte' and switch back to smaller pages for the
2325 end of the mapping, if the trailing size is not enough to
2326 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002327 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002328 if (!nr_pages || first_pte_in_page(pte) ||
2329 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002330 domain_flush_cache(domain, first_pte,
2331 (void *)pte - (void *)first_pte);
2332 pte = NULL;
2333 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002334
2335 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002336 sg = sg_next(sg);
2337 }
2338 return 0;
2339}
2340
David Woodhouse9051aa02009-06-29 12:30:54 +01002341static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2342 struct scatterlist *sg, unsigned long nr_pages,
2343 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002344{
David Woodhouse9051aa02009-06-29 12:30:54 +01002345 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2346}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002347
David Woodhouse9051aa02009-06-29 12:30:54 +01002348static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2349 unsigned long phys_pfn, unsigned long nr_pages,
2350 int prot)
2351{
2352 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002353}
2354
Joerg Roedel2452d9d2015-07-23 16:20:14 +02002355static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002356{
Filippo Sironi50822192017-08-31 10:58:11 +02002357 unsigned long flags;
2358 struct context_entry *context;
2359 u16 did_old;
2360
Weidong Hanc7151a82008-12-08 22:51:37 +08002361 if (!iommu)
2362 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002363
Filippo Sironi50822192017-08-31 10:58:11 +02002364 spin_lock_irqsave(&iommu->lock, flags);
2365 context = iommu_context_addr(iommu, bus, devfn, 0);
2366 if (!context) {
2367 spin_unlock_irqrestore(&iommu->lock, flags);
2368 return;
2369 }
2370 did_old = context_domain_id(context);
2371 context_clear_entry(context);
2372 __iommu_flush_cache(iommu, context, sizeof(*context));
2373 spin_unlock_irqrestore(&iommu->lock, flags);
2374 iommu->flush.flush_context(iommu,
2375 did_old,
2376 (((u16)bus) << 8) | devfn,
2377 DMA_CCMD_MASK_NOBIT,
2378 DMA_CCMD_DEVICE_INVL);
2379 iommu->flush.flush_iotlb(iommu,
2380 did_old,
2381 0,
2382 0,
2383 DMA_TLB_DSI_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002384}
2385
David Woodhouse109b9b02012-05-25 17:43:02 +01002386static inline void unlink_domain_info(struct device_domain_info *info)
2387{
2388 assert_spin_locked(&device_domain_lock);
2389 list_del(&info->link);
2390 list_del(&info->global);
2391 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002392 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002393}
2394
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002395static void domain_remove_dev_info(struct dmar_domain *domain)
2396{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002397 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002398 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399
2400 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel76f45fe2015-07-21 18:25:11 +02002401 list_for_each_entry_safe(info, tmp, &domain->devices, link)
Joerg Roedel127c7612015-07-23 17:44:46 +02002402 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002403 spin_unlock_irqrestore(&device_domain_lock, flags);
2404}
2405
2406/*
2407 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002408 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002409 */
David Woodhouse1525a292014-03-06 16:19:30 +00002410static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002411{
2412 struct device_domain_info *info;
2413
2414 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002415 info = dev->archdata.iommu;
Peter Xub316d022017-05-22 18:28:51 +08002416 if (likely(info))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002417 return info->domain;
2418 return NULL;
2419}
2420
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002421static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002422dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2423{
2424 struct device_domain_info *info;
2425
2426 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002427 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002428 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002429 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002430
2431 return NULL;
2432}
2433
Joerg Roedel5db31562015-07-22 12:40:43 +02002434static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2435 int bus, int devfn,
2436 struct device *dev,
2437 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002438{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002439 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002440 struct device_domain_info *info;
2441 unsigned long flags;
Joerg Roedeld160aca2015-07-22 11:52:53 +02002442 int ret;
Jiang Liu745f2582014-02-19 14:07:26 +08002443
2444 info = alloc_devinfo_mem();
2445 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002446 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002447
Jiang Liu745f2582014-02-19 14:07:26 +08002448 info->bus = bus;
2449 info->devfn = devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002450 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2451 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2452 info->ats_qdep = 0;
Jiang Liu745f2582014-02-19 14:07:26 +08002453 info->dev = dev;
2454 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002455 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002456
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002457 if (dev && dev_is_pci(dev)) {
2458 struct pci_dev *pdev = to_pci_dev(info->dev);
2459
2460 if (ecap_dev_iotlb_support(iommu->ecap) &&
2461 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2462 dmar_find_matched_atsr_unit(pdev))
2463 info->ats_supported = 1;
2464
2465 if (ecs_enabled(iommu)) {
2466 if (pasid_enabled(iommu)) {
2467 int features = pci_pasid_features(pdev);
2468 if (features >= 0)
2469 info->pasid_supported = features | 1;
2470 }
2471
2472 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2473 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2474 info->pri_supported = 1;
2475 }
2476 }
2477
Jiang Liu745f2582014-02-19 14:07:26 +08002478 spin_lock_irqsave(&device_domain_lock, flags);
2479 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002480 found = find_domain(dev);
Joerg Roedelf303e502015-07-23 18:37:13 +02002481
2482 if (!found) {
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002483 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002484 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
Joerg Roedelf303e502015-07-23 18:37:13 +02002485 if (info2) {
2486 found = info2->domain;
2487 info2->dev = dev;
2488 }
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002489 }
Joerg Roedelf303e502015-07-23 18:37:13 +02002490
Jiang Liu745f2582014-02-19 14:07:26 +08002491 if (found) {
2492 spin_unlock_irqrestore(&device_domain_lock, flags);
2493 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002494 /* Caller must free the original domain */
2495 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002496 }
2497
Joerg Roedeld160aca2015-07-22 11:52:53 +02002498 spin_lock(&iommu->lock);
2499 ret = domain_attach_iommu(domain, iommu);
2500 spin_unlock(&iommu->lock);
2501
2502 if (ret) {
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002503 spin_unlock_irqrestore(&device_domain_lock, flags);
Sudip Mukherjee499f3aa2015-09-18 16:27:07 +05302504 free_devinfo_mem(info);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002505 return NULL;
2506 }
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002507
David Woodhouseb718cd32014-03-09 13:11:33 -07002508 list_add(&info->link, &domain->devices);
2509 list_add(&info->global, &device_domain_list);
2510 if (dev)
2511 dev->archdata.iommu = info;
2512 spin_unlock_irqrestore(&device_domain_lock, flags);
2513
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002514 if (dev && domain_context_mapping(domain, dev)) {
2515 pr_err("Domain context map for %s failed\n", dev_name(dev));
Joerg Roedele6de0f82015-07-22 16:30:36 +02002516 dmar_remove_one_dev_info(domain, dev);
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002517 return NULL;
2518 }
2519
David Woodhouseb718cd32014-03-09 13:11:33 -07002520 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002521}
2522
Alex Williamson579305f2014-07-03 09:51:43 -06002523static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2524{
2525 *(u16 *)opaque = alias;
2526 return 0;
2527}
2528
Joerg Roedel76208352016-08-25 14:25:12 +02002529static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002530{
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002531 struct device_domain_info *info = NULL;
Joerg Roedel76208352016-08-25 14:25:12 +02002532 struct dmar_domain *domain = NULL;
Alex Williamson579305f2014-07-03 09:51:43 -06002533 struct intel_iommu *iommu;
Joerg Roedel08a7f452015-07-23 18:09:11 +02002534 u16 req_id, dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002535 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002536 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002537
David Woodhouse146922e2014-03-09 15:44:17 -07002538 iommu = device_to_iommu(dev, &bus, &devfn);
2539 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002540 return NULL;
2541
Joerg Roedel08a7f452015-07-23 18:09:11 +02002542 req_id = ((u16)bus << 8) | devfn;
2543
Alex Williamson579305f2014-07-03 09:51:43 -06002544 if (dev_is_pci(dev)) {
2545 struct pci_dev *pdev = to_pci_dev(dev);
2546
2547 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2548
2549 spin_lock_irqsave(&device_domain_lock, flags);
2550 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2551 PCI_BUS_NUM(dma_alias),
2552 dma_alias & 0xff);
2553 if (info) {
2554 iommu = info->iommu;
2555 domain = info->domain;
2556 }
2557 spin_unlock_irqrestore(&device_domain_lock, flags);
2558
Joerg Roedel76208352016-08-25 14:25:12 +02002559 /* DMA alias already has a domain, use it */
Alex Williamson579305f2014-07-03 09:51:43 -06002560 if (info)
Joerg Roedel76208352016-08-25 14:25:12 +02002561 goto out;
Alex Williamson579305f2014-07-03 09:51:43 -06002562 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002563
David Woodhouse146922e2014-03-09 15:44:17 -07002564 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002565 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002566 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002567 return NULL;
Joerg Roedeldc534b22015-07-22 12:44:02 +02002568 if (domain_init(domain, iommu, gaw)) {
Alex Williamson579305f2014-07-03 09:51:43 -06002569 domain_exit(domain);
2570 return NULL;
2571 }
2572
Joerg Roedel76208352016-08-25 14:25:12 +02002573out:
Alex Williamson579305f2014-07-03 09:51:43 -06002574
Joerg Roedel76208352016-08-25 14:25:12 +02002575 return domain;
2576}
2577
2578static struct dmar_domain *set_domain_for_dev(struct device *dev,
2579 struct dmar_domain *domain)
2580{
2581 struct intel_iommu *iommu;
2582 struct dmar_domain *tmp;
2583 u16 req_id, dma_alias;
2584 u8 bus, devfn;
2585
2586 iommu = device_to_iommu(dev, &bus, &devfn);
2587 if (!iommu)
2588 return NULL;
2589
2590 req_id = ((u16)bus << 8) | devfn;
2591
2592 if (dev_is_pci(dev)) {
2593 struct pci_dev *pdev = to_pci_dev(dev);
2594
2595 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2596
2597 /* register PCI DMA alias device */
2598 if (req_id != dma_alias) {
2599 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2600 dma_alias & 0xff, NULL, domain);
2601
2602 if (!tmp || tmp != domain)
2603 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002604 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002605 }
2606
Joerg Roedel5db31562015-07-22 12:40:43 +02002607 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
Joerg Roedel76208352016-08-25 14:25:12 +02002608 if (!tmp || tmp != domain)
2609 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002610
Joerg Roedel76208352016-08-25 14:25:12 +02002611 return domain;
2612}
2613
2614static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2615{
2616 struct dmar_domain *domain, *tmp;
2617
2618 domain = find_domain(dev);
2619 if (domain)
2620 goto out;
2621
2622 domain = find_or_alloc_domain(dev, gaw);
2623 if (!domain)
2624 goto out;
2625
2626 tmp = set_domain_for_dev(dev, domain);
2627 if (!tmp || domain != tmp) {
Alex Williamson579305f2014-07-03 09:51:43 -06002628 domain_exit(domain);
2629 domain = tmp;
2630 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002631
Joerg Roedel76208352016-08-25 14:25:12 +02002632out:
2633
David Woodhouseb718cd32014-03-09 13:11:33 -07002634 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002635}
2636
David Woodhouseb2132032009-06-26 18:50:28 +01002637static int iommu_domain_identity_map(struct dmar_domain *domain,
2638 unsigned long long start,
2639 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002640{
David Woodhousec5395d52009-06-28 16:35:56 +01002641 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2642 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002643
David Woodhousec5395d52009-06-28 16:35:56 +01002644 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2645 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002646 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002647 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002648 }
2649
Joerg Roedelaf1089c2015-07-21 15:45:19 +02002650 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002651 /*
2652 * RMRR range might have overlap with physical memory range,
2653 * clear it first
2654 */
David Woodhousec5395d52009-06-28 16:35:56 +01002655 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002656
David Woodhousec5395d52009-06-28 16:35:56 +01002657 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2658 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002659 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002660}
2661
Joerg Roedeld66ce542015-09-23 19:00:10 +02002662static int domain_prepare_identity_map(struct device *dev,
2663 struct dmar_domain *domain,
2664 unsigned long long start,
2665 unsigned long long end)
David Woodhouseb2132032009-06-26 18:50:28 +01002666{
David Woodhouse19943b02009-08-04 16:19:20 +01002667 /* For _hardware_ passthrough, don't bother. But for software
2668 passthrough, we do it anyway -- it may indicate a memory
2669 range which is reserved in E820, so which didn't get set
2670 up to start with in si_domain */
2671 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002672 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2673 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002674 return 0;
2675 }
2676
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002677 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2678 dev_name(dev), start, end);
2679
David Woodhouse5595b522009-12-02 09:21:55 +00002680 if (end < start) {
2681 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2682 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2683 dmi_get_system_info(DMI_BIOS_VENDOR),
2684 dmi_get_system_info(DMI_BIOS_VERSION),
2685 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002686 return -EIO;
David Woodhouse5595b522009-12-02 09:21:55 +00002687 }
2688
David Woodhouse2ff729f2009-08-26 14:25:41 +01002689 if (end >> agaw_to_width(domain->agaw)) {
2690 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2691 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2692 agaw_to_width(domain->agaw),
2693 dmi_get_system_info(DMI_BIOS_VENDOR),
2694 dmi_get_system_info(DMI_BIOS_VERSION),
2695 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002696 return -EIO;
David Woodhouse2ff729f2009-08-26 14:25:41 +01002697 }
David Woodhouse19943b02009-08-04 16:19:20 +01002698
Joerg Roedeld66ce542015-09-23 19:00:10 +02002699 return iommu_domain_identity_map(domain, start, end);
2700}
2701
2702static int iommu_prepare_identity_map(struct device *dev,
2703 unsigned long long start,
2704 unsigned long long end)
2705{
2706 struct dmar_domain *domain;
2707 int ret;
2708
2709 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2710 if (!domain)
2711 return -ENOMEM;
2712
2713 ret = domain_prepare_identity_map(dev, domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002714 if (ret)
Joerg Roedeld66ce542015-09-23 19:00:10 +02002715 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002717 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002718}
2719
2720static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002721 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002722{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002723 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002724 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002725 return iommu_prepare_identity_map(dev, rmrr->base_address,
2726 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002727}
2728
Suresh Siddhad3f13812011-08-23 17:05:25 -07002729#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002730static inline void iommu_prepare_isa(void)
2731{
2732 struct pci_dev *pdev;
2733 int ret;
2734
2735 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2736 if (!pdev)
2737 return;
2738
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002739 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002740 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002741
2742 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002743 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002744
Yijing Wang9b27e822014-05-20 20:37:52 +08002745 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002746}
2747#else
2748static inline void iommu_prepare_isa(void)
2749{
2750 return;
2751}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002752#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002753
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002754static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002755
Matt Kraai071e1372009-08-23 22:30:22 -07002756static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002757{
David Woodhousec7ab48d2009-06-26 19:10:36 +01002758 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002759
Jiang Liuab8dfe22014-07-11 14:19:27 +08002760 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002761 if (!si_domain)
2762 return -EFAULT;
2763
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002764 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2765 domain_exit(si_domain);
2766 return -EFAULT;
2767 }
2768
Joerg Roedel0dc79712015-07-21 15:40:06 +02002769 pr_debug("Identity mapping domain allocated\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002770
David Woodhouse19943b02009-08-04 16:19:20 +01002771 if (hw)
2772 return 0;
2773
David Woodhousec7ab48d2009-06-26 19:10:36 +01002774 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002775 unsigned long start_pfn, end_pfn;
2776 int i;
2777
2778 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2779 ret = iommu_domain_identity_map(si_domain,
2780 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2781 if (ret)
2782 return ret;
2783 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002784 }
2785
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002786 return 0;
2787}
2788
David Woodhouse9b226622014-03-09 14:03:28 -07002789static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002790{
2791 struct device_domain_info *info;
2792
2793 if (likely(!iommu_identity_mapping))
2794 return 0;
2795
David Woodhouse9b226622014-03-09 14:03:28 -07002796 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002797 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2798 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002799
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002800 return 0;
2801}
2802
Joerg Roedel28ccce02015-07-21 14:45:31 +02002803static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002804{
David Woodhouse0ac72662014-03-09 13:19:22 -07002805 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002806 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002807 u8 bus, devfn;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002808
David Woodhouse5913c9b2014-03-09 16:27:31 -07002809 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002810 if (!iommu)
2811 return -ENODEV;
2812
Joerg Roedel5db31562015-07-22 12:40:43 +02002813 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002814 if (ndomain != domain)
2815 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002816
2817 return 0;
2818}
2819
David Woodhouse0b9d9752014-03-09 15:48:15 -07002820static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002821{
2822 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002823 struct device *tmp;
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002824 int i;
2825
Jiang Liu0e2426122014-02-19 14:07:34 +08002826 rcu_read_lock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002827 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002828 /*
2829 * Return TRUE if this RMRR contains the device that
2830 * is passed in.
2831 */
2832 for_each_active_dev_scope(rmrr->devices,
2833 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002834 if (tmp == dev) {
Jiang Liu0e2426122014-02-19 14:07:34 +08002835 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002836 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002837 }
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002838 }
Jiang Liu0e2426122014-02-19 14:07:34 +08002839 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002840 return false;
2841}
2842
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002843/*
2844 * There are a couple cases where we need to restrict the functionality of
2845 * devices associated with RMRRs. The first is when evaluating a device for
2846 * identity mapping because problems exist when devices are moved in and out
2847 * of domains and their respective RMRR information is lost. This means that
2848 * a device with associated RMRRs will never be in a "passthrough" domain.
2849 * The second is use of the device through the IOMMU API. This interface
2850 * expects to have full control of the IOVA space for the device. We cannot
2851 * satisfy both the requirement that RMRR access is maintained and have an
2852 * unencumbered IOVA space. We also have no ability to quiesce the device's
2853 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2854 * We therefore prevent devices associated with an RMRR from participating in
2855 * the IOMMU API, which eliminates them from device assignment.
2856 *
2857 * In both cases we assume that PCI USB devices with RMRRs have them largely
2858 * for historical reasons and that the RMRR space is not actively used post
2859 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002860 *
2861 * The same exception is made for graphics devices, with the requirement that
2862 * any use of the RMRR regions will be torn down before assigning the device
2863 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002864 */
2865static bool device_is_rmrr_locked(struct device *dev)
2866{
2867 if (!device_has_rmrr(dev))
2868 return false;
2869
2870 if (dev_is_pci(dev)) {
2871 struct pci_dev *pdev = to_pci_dev(dev);
2872
David Woodhouse18436af2015-03-25 15:05:47 +00002873 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002874 return false;
2875 }
2876
2877 return true;
2878}
2879
David Woodhouse3bdb2592014-03-09 16:03:08 -07002880static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002881{
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002882
David Woodhouse3bdb2592014-03-09 16:03:08 -07002883 if (dev_is_pci(dev)) {
2884 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002885
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002886 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002887 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002888
David Woodhouse3bdb2592014-03-09 16:03:08 -07002889 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2890 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002891
David Woodhouse3bdb2592014-03-09 16:03:08 -07002892 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2893 return 1;
2894
2895 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2896 return 0;
2897
2898 /*
2899 * We want to start off with all devices in the 1:1 domain, and
2900 * take them out later if we find they can't access all of memory.
2901 *
2902 * However, we can't do this for PCI devices behind bridges,
2903 * because all PCI devices behind the same bridge will end up
2904 * with the same source-id on their transactions.
2905 *
2906 * Practically speaking, we can't change things around for these
2907 * devices at run-time, because we can't be sure there'll be no
2908 * DMA transactions in flight for any of their siblings.
2909 *
2910 * So PCI devices (unless they're on the root bus) as well as
2911 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2912 * the 1:1 domain, just in _case_ one of their siblings turns out
2913 * not to be able to map all of memory.
2914 */
2915 if (!pci_is_pcie(pdev)) {
2916 if (!pci_is_root_bus(pdev->bus))
2917 return 0;
2918 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2919 return 0;
2920 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2921 return 0;
2922 } else {
2923 if (device_has_rmrr(dev))
2924 return 0;
2925 }
David Woodhouse6941af22009-07-04 18:24:27 +01002926
David Woodhouse3dfc8132009-07-04 19:11:08 +01002927 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002928 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002929 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002930 * take them out of the 1:1 domain later.
2931 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002932 if (!startup) {
2933 /*
2934 * If the device's dma_mask is less than the system's memory
2935 * size then this is not a candidate for identity mapping.
2936 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002937 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002938
David Woodhouse3bdb2592014-03-09 16:03:08 -07002939 if (dev->coherent_dma_mask &&
2940 dev->coherent_dma_mask < dma_mask)
2941 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002942
David Woodhouse3bdb2592014-03-09 16:03:08 -07002943 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002944 }
David Woodhouse6941af22009-07-04 18:24:27 +01002945
2946 return 1;
2947}
2948
David Woodhousecf04eee2014-03-21 16:49:04 +00002949static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2950{
2951 int ret;
2952
2953 if (!iommu_should_identity_map(dev, 1))
2954 return 0;
2955
Joerg Roedel28ccce02015-07-21 14:45:31 +02002956 ret = domain_add_dev_info(si_domain, dev);
David Woodhousecf04eee2014-03-21 16:49:04 +00002957 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002958 pr_info("%s identity mapping for device %s\n",
2959 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002960 else if (ret == -ENODEV)
2961 /* device not associated with an iommu */
2962 ret = 0;
2963
2964 return ret;
2965}
2966
2967
Matt Kraai071e1372009-08-23 22:30:22 -07002968static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002969{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002970 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002971 struct dmar_drhd_unit *drhd;
2972 struct intel_iommu *iommu;
2973 struct device *dev;
2974 int i;
2975 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002976
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002977 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002978 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2979 if (ret)
2980 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002981 }
2982
David Woodhousecf04eee2014-03-21 16:49:04 +00002983 for_each_active_iommu(iommu, drhd)
2984 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2985 struct acpi_device_physical_node *pn;
2986 struct acpi_device *adev;
2987
2988 if (dev->bus != &acpi_bus_type)
2989 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002990
David Woodhousecf04eee2014-03-21 16:49:04 +00002991 adev= to_acpi_device(dev);
2992 mutex_lock(&adev->physical_node_lock);
2993 list_for_each_entry(pn, &adev->physical_node_list, node) {
2994 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2995 if (ret)
2996 break;
2997 }
2998 mutex_unlock(&adev->physical_node_lock);
2999 if (ret)
3000 return ret;
3001 }
3002
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003003 return 0;
3004}
3005
Jiang Liuffebeb42014-11-09 22:48:02 +08003006static void intel_iommu_init_qi(struct intel_iommu *iommu)
3007{
3008 /*
3009 * Start from the sane iommu hardware state.
3010 * If the queued invalidation is already initialized by us
3011 * (for example, while enabling interrupt-remapping) then
3012 * we got the things already rolling from a sane state.
3013 */
3014 if (!iommu->qi) {
3015 /*
3016 * Clear any previous faults.
3017 */
3018 dmar_fault(-1, iommu);
3019 /*
3020 * Disable queued invalidation if supported and already enabled
3021 * before OS handover.
3022 */
3023 dmar_disable_qi(iommu);
3024 }
3025
3026 if (dmar_enable_qi(iommu)) {
3027 /*
3028 * Queued Invalidate not enabled, use Register Based Invalidate
3029 */
3030 iommu->flush.flush_context = __iommu_flush_context;
3031 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003032 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003033 iommu->name);
3034 } else {
3035 iommu->flush.flush_context = qi_flush_context;
3036 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003037 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08003038 }
3039}
3040
Joerg Roedel091d42e2015-06-12 11:56:10 +02003041static int copy_context_table(struct intel_iommu *iommu,
Dan Williamsdfddb962015-10-09 18:16:46 -04003042 struct root_entry *old_re,
Joerg Roedel091d42e2015-06-12 11:56:10 +02003043 struct context_entry **tbl,
3044 int bus, bool ext)
3045{
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003046 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003047 struct context_entry *new_ce = NULL, ce;
Dan Williamsdfddb962015-10-09 18:16:46 -04003048 struct context_entry *old_ce = NULL;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003049 struct root_entry re;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003050 phys_addr_t old_ce_phys;
3051
3052 tbl_idx = ext ? bus * 2 : bus;
Dan Williamsdfddb962015-10-09 18:16:46 -04003053 memcpy(&re, old_re, sizeof(re));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003054
3055 for (devfn = 0; devfn < 256; devfn++) {
3056 /* First calculate the correct index */
3057 idx = (ext ? devfn * 2 : devfn) % 256;
3058
3059 if (idx == 0) {
3060 /* First save what we may have and clean up */
3061 if (new_ce) {
3062 tbl[tbl_idx] = new_ce;
3063 __iommu_flush_cache(iommu, new_ce,
3064 VTD_PAGE_SIZE);
3065 pos = 1;
3066 }
3067
3068 if (old_ce)
3069 iounmap(old_ce);
3070
3071 ret = 0;
3072 if (devfn < 0x80)
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003073 old_ce_phys = root_entry_lctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003074 else
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003075 old_ce_phys = root_entry_uctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003076
3077 if (!old_ce_phys) {
3078 if (ext && devfn == 0) {
3079 /* No LCTP, try UCTP */
3080 devfn = 0x7f;
3081 continue;
3082 } else {
3083 goto out;
3084 }
3085 }
3086
3087 ret = -ENOMEM;
Dan Williamsdfddb962015-10-09 18:16:46 -04003088 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3089 MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003090 if (!old_ce)
3091 goto out;
3092
3093 new_ce = alloc_pgtable_page(iommu->node);
3094 if (!new_ce)
3095 goto out_unmap;
3096
3097 ret = 0;
3098 }
3099
3100 /* Now copy the context entry */
Dan Williamsdfddb962015-10-09 18:16:46 -04003101 memcpy(&ce, old_ce + idx, sizeof(ce));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003102
Joerg Roedelcf484d02015-06-12 12:21:46 +02003103 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02003104 continue;
3105
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003106 did = context_domain_id(&ce);
3107 if (did >= 0 && did < cap_ndoms(iommu->cap))
3108 set_bit(did, iommu->domain_ids);
3109
Joerg Roedelcf484d02015-06-12 12:21:46 +02003110 /*
3111 * We need a marker for copied context entries. This
3112 * marker needs to work for the old format as well as
3113 * for extended context entries.
3114 *
3115 * Bit 67 of the context entry is used. In the old
3116 * format this bit is available to software, in the
3117 * extended format it is the PGE bit, but PGE is ignored
3118 * by HW if PASIDs are disabled (and thus still
3119 * available).
3120 *
3121 * So disable PASIDs first and then mark the entry
3122 * copied. This means that we don't copy PASID
3123 * translations from the old kernel, but this is fine as
3124 * faults there are not fatal.
3125 */
3126 context_clear_pasid_enable(&ce);
3127 context_set_copied(&ce);
3128
Joerg Roedel091d42e2015-06-12 11:56:10 +02003129 new_ce[idx] = ce;
3130 }
3131
3132 tbl[tbl_idx + pos] = new_ce;
3133
3134 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3135
3136out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003137 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003138
3139out:
3140 return ret;
3141}
3142
3143static int copy_translation_tables(struct intel_iommu *iommu)
3144{
3145 struct context_entry **ctxt_tbls;
Dan Williamsdfddb962015-10-09 18:16:46 -04003146 struct root_entry *old_rt;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003147 phys_addr_t old_rt_phys;
3148 int ctxt_table_entries;
3149 unsigned long flags;
3150 u64 rtaddr_reg;
3151 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02003152 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003153
3154 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3155 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02003156 new_ext = !!ecap_ecs(iommu->ecap);
3157
3158 /*
3159 * The RTT bit can only be changed when translation is disabled,
3160 * but disabling translation means to open a window for data
3161 * corruption. So bail out and don't copy anything if we would
3162 * have to change the bit.
3163 */
3164 if (new_ext != ext)
3165 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003166
3167 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3168 if (!old_rt_phys)
3169 return -EINVAL;
3170
Dan Williamsdfddb962015-10-09 18:16:46 -04003171 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003172 if (!old_rt)
3173 return -ENOMEM;
3174
3175 /* This is too big for the stack - allocate it from slab */
3176 ctxt_table_entries = ext ? 512 : 256;
3177 ret = -ENOMEM;
3178 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3179 if (!ctxt_tbls)
3180 goto out_unmap;
3181
3182 for (bus = 0; bus < 256; bus++) {
3183 ret = copy_context_table(iommu, &old_rt[bus],
3184 ctxt_tbls, bus, ext);
3185 if (ret) {
3186 pr_err("%s: Failed to copy context table for bus %d\n",
3187 iommu->name, bus);
3188 continue;
3189 }
3190 }
3191
3192 spin_lock_irqsave(&iommu->lock, flags);
3193
3194 /* Context tables are copied, now write them to the root_entry table */
3195 for (bus = 0; bus < 256; bus++) {
3196 int idx = ext ? bus * 2 : bus;
3197 u64 val;
3198
3199 if (ctxt_tbls[idx]) {
3200 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3201 iommu->root_entry[bus].lo = val;
3202 }
3203
3204 if (!ext || !ctxt_tbls[idx + 1])
3205 continue;
3206
3207 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3208 iommu->root_entry[bus].hi = val;
3209 }
3210
3211 spin_unlock_irqrestore(&iommu->lock, flags);
3212
3213 kfree(ctxt_tbls);
3214
3215 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3216
3217 ret = 0;
3218
3219out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003220 memunmap(old_rt);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003221
3222 return ret;
3223}
3224
Joseph Cihulab7792602011-05-03 00:08:37 -07003225static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003226{
3227 struct dmar_drhd_unit *drhd;
3228 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003229 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003230 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003231 struct intel_iommu *iommu;
Joerg Roedel13cf0172017-08-11 11:40:10 +02003232 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003233
3234 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003235 * for each drhd
3236 * allocate root
3237 * initialize and program root entry to not present
3238 * endfor
3239 */
3240 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003241 /*
3242 * lock not needed as this is only incremented in the single
3243 * threaded kernel __init code path all other access are read
3244 * only
3245 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003246 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003247 g_num_of_iommus++;
3248 continue;
3249 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003250 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003251 }
3252
Jiang Liuffebeb42014-11-09 22:48:02 +08003253 /* Preallocate enough resources for IOMMU hot-addition */
3254 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3255 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3256
Weidong Hand9630fe2008-12-08 11:06:32 +08003257 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3258 GFP_KERNEL);
3259 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003260 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003261 ret = -ENOMEM;
3262 goto error;
3263 }
3264
Jiang Liu7c919772014-01-06 14:18:18 +08003265 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003266 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003267
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003268 intel_iommu_init_qi(iommu);
3269
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003270 ret = iommu_init_domains(iommu);
3271 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003272 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003273
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003274 init_translation_status(iommu);
3275
Joerg Roedel091d42e2015-06-12 11:56:10 +02003276 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3277 iommu_disable_translation(iommu);
3278 clear_translation_pre_enabled(iommu);
3279 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3280 iommu->name);
3281 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003282
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003283 /*
3284 * TBD:
3285 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003286 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003287 */
3288 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003289 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003290 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003291
Joerg Roedel091d42e2015-06-12 11:56:10 +02003292 if (translation_pre_enabled(iommu)) {
3293 pr_info("Translation already enabled - trying to copy translation structures\n");
3294
3295 ret = copy_translation_tables(iommu);
3296 if (ret) {
3297 /*
3298 * We found the IOMMU with translation
3299 * enabled - but failed to copy over the
3300 * old root-entry table. Try to proceed
3301 * by disabling translation now and
3302 * allocating a clean root-entry table.
3303 * This might cause DMAR faults, but
3304 * probably the dump will still succeed.
3305 */
3306 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3307 iommu->name);
3308 iommu_disable_translation(iommu);
3309 clear_translation_pre_enabled(iommu);
3310 } else {
3311 pr_info("Copied translation tables from previous kernel for %s\n",
3312 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003313 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003314 }
3315 }
3316
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003317 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003318 hw_pass_through = 0;
David Woodhouse8a94ade2015-03-24 14:54:56 +00003319#ifdef CONFIG_INTEL_IOMMU_SVM
3320 if (pasid_enabled(iommu))
3321 intel_svm_alloc_pasid_tables(iommu);
3322#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003323 }
3324
Joerg Roedela4c34ff2016-06-17 11:29:48 +02003325 /*
3326 * Now that qi is enabled on all iommus, set the root entry and flush
3327 * caches. This is required on some Intel X58 chipsets, otherwise the
3328 * flush_context function will loop forever and the boot hangs.
3329 */
3330 for_each_active_iommu(iommu, drhd) {
3331 iommu_flush_write_buffer(iommu);
3332 iommu_set_root_entry(iommu);
3333 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3334 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3335 }
3336
David Woodhouse19943b02009-08-04 16:19:20 +01003337 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003338 iommu_identity_mapping |= IDENTMAP_ALL;
3339
Suresh Siddhad3f13812011-08-23 17:05:25 -07003340#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003341 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003342#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003343
Ashok Raj21e722c2017-01-30 09:39:53 -08003344 check_tylersburg_isoch();
3345
Joerg Roedel86080cc2015-06-12 12:27:16 +02003346 if (iommu_identity_mapping) {
3347 ret = si_domain_init(hw_pass_through);
3348 if (ret)
3349 goto free_iommu;
3350 }
3351
David Woodhousee0fc7e02009-09-30 09:12:17 -07003352
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003353 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003354 * If we copied translations from a previous kernel in the kdump
3355 * case, we can not assign the devices to domains now, as that
3356 * would eliminate the old mappings. So skip this part and defer
3357 * the assignment to device driver initialization time.
3358 */
3359 if (copied_tables)
3360 goto domains_done;
3361
3362 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003363 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003364 * identity mappings for rmrr, gfx, and isa and may fall back to static
3365 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003366 */
David Woodhouse19943b02009-08-04 16:19:20 +01003367 if (iommu_identity_mapping) {
3368 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3369 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003370 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003371 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003372 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003373 }
David Woodhouse19943b02009-08-04 16:19:20 +01003374 /*
3375 * For each rmrr
3376 * for each dev attached to rmrr
3377 * do
3378 * locate drhd for dev, alloc domain for dev
3379 * allocate free domain
3380 * allocate page table entries for rmrr
3381 * if context not allocated for bus
3382 * allocate and init context
3383 * set present in root table for this bus
3384 * init context with domain, translation etc
3385 * endfor
3386 * endfor
3387 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003388 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003389 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003390 /* some BIOS lists non-exist devices in DMAR table. */
3391 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003392 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003393 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003394 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003395 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003396 }
3397 }
3398
3399 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003400
Joerg Roedela87f4912015-06-12 12:32:54 +02003401domains_done:
3402
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003403 /*
3404 * for each drhd
3405 * enable fault log
3406 * global invalidate context cache
3407 * global invalidate iotlb
3408 * enable translation
3409 */
Jiang Liu7c919772014-01-06 14:18:18 +08003410 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003411 if (drhd->ignored) {
3412 /*
3413 * we always have to disable PMRs or DMA may fail on
3414 * this device
3415 */
3416 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003417 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003418 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003419 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003420
3421 iommu_flush_write_buffer(iommu);
3422
David Woodhousea222a7f2015-10-07 23:35:18 +01003423#ifdef CONFIG_INTEL_IOMMU_SVM
3424 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3425 ret = intel_svm_enable_prq(iommu);
3426 if (ret)
3427 goto free_iommu;
3428 }
3429#endif
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003430 ret = dmar_set_interrupt(iommu);
3431 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003432 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003433
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003434 if (!translation_pre_enabled(iommu))
3435 iommu_enable_translation(iommu);
3436
David Woodhouseb94996c2009-09-19 15:28:12 -07003437 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003438 }
3439
3440 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003441
3442free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003443 for_each_active_iommu(iommu, drhd) {
3444 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003445 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003446 }
Joerg Roedel13cf0172017-08-11 11:40:10 +02003447
Weidong Hand9630fe2008-12-08 11:06:32 +08003448 kfree(g_iommus);
Joerg Roedel13cf0172017-08-11 11:40:10 +02003449
Jiang Liu989d51f2014-02-19 14:07:21 +08003450error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003451 return ret;
3452}
3453
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003454/* This takes a number of _MM_ pages, not VTD pages */
Omer Peleg2aac6302016-04-20 11:33:57 +03003455static unsigned long intel_alloc_iova(struct device *dev,
David Woodhouse875764d2009-06-28 21:20:51 +01003456 struct dmar_domain *domain,
3457 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003458{
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003459 unsigned long iova_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003460
David Woodhouse875764d2009-06-28 21:20:51 +01003461 /* Restrict dma_mask to the width that the iommu can handle */
3462 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
Robin Murphy8f6429c2015-07-16 19:40:12 +01003463 /* Ensure we reserve the whole size-aligned region */
3464 nrpages = __roundup_pow_of_two(nrpages);
David Woodhouse875764d2009-06-28 21:20:51 +01003465
3466 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003467 /*
3468 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003469 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003470 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003471 */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003472 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003473 IOVA_PFN(DMA_BIT_MASK(32)), false);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003474 if (iova_pfn)
3475 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003476 }
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003477 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3478 IOVA_PFN(dma_mask), true);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003479 if (unlikely(!iova_pfn)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003480 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003481 nrpages, dev_name(dev));
Omer Peleg2aac6302016-04-20 11:33:57 +03003482 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003483 }
3484
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003485 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003486}
3487
Peter Xub316d022017-05-22 18:28:51 +08003488static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003489{
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003490 struct dmar_domain *domain, *tmp;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003491 struct dmar_rmrr_unit *rmrr;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003492 struct device *i_dev;
3493 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003494
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003495 domain = find_domain(dev);
3496 if (domain)
3497 goto out;
3498
3499 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3500 if (!domain)
3501 goto out;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003502
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003503 /* We have a new domain - setup possible RMRRs for the device */
3504 rcu_read_lock();
3505 for_each_rmrr_units(rmrr) {
3506 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3507 i, i_dev) {
3508 if (i_dev != dev)
3509 continue;
3510
3511 ret = domain_prepare_identity_map(dev, domain,
3512 rmrr->base_address,
3513 rmrr->end_address);
3514 if (ret)
3515 dev_err(dev, "Mapping reserved region failed\n");
3516 }
3517 }
3518 rcu_read_unlock();
3519
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003520 tmp = set_domain_for_dev(dev, domain);
3521 if (!tmp || domain != tmp) {
3522 domain_exit(domain);
3523 domain = tmp;
3524 }
3525
3526out:
3527
3528 if (!domain)
3529 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3530
3531
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003532 return domain;
3533}
3534
David Woodhouseecb509e2014-03-09 16:29:55 -07003535/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003536static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003537{
3538 int found;
3539
David Woodhouse3d891942014-03-06 15:59:26 +00003540 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003541 return 1;
3542
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003543 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003544 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003545
David Woodhouse9b226622014-03-09 14:03:28 -07003546 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003547 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003548 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003549 return 1;
3550 else {
3551 /*
3552 * 32 bit DMA is removed from si_domain and fall back
3553 * to non-identity mapping.
3554 */
Joerg Roedele6de0f82015-07-22 16:30:36 +02003555 dmar_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003556 pr_info("32bit %s uses non-identity mapping\n",
3557 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003558 return 0;
3559 }
3560 } else {
3561 /*
3562 * In case of a detached 64 bit DMA device from vm, the device
3563 * is put into si_domain for identity mapping.
3564 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003565 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003566 int ret;
Joerg Roedel28ccce02015-07-21 14:45:31 +02003567 ret = domain_add_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003568 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003569 pr_info("64bit %s uses identity mapping\n",
3570 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003571 return 1;
3572 }
3573 }
3574 }
3575
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003576 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003577}
3578
David Woodhouse5040a912014-03-09 16:14:00 -07003579static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003580 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003581{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003582 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003583 phys_addr_t start_paddr;
Omer Peleg2aac6302016-04-20 11:33:57 +03003584 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003585 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003586 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003587 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003588 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003589
3590 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003591
David Woodhouse5040a912014-03-09 16:14:00 -07003592 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003593 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003594
David Woodhouse5040a912014-03-09 16:14:00 -07003595 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003596 if (!domain)
3597 return 0;
3598
Weidong Han8c11e792008-12-08 15:29:22 +08003599 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003600 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003601
Omer Peleg2aac6302016-04-20 11:33:57 +03003602 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3603 if (!iova_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003604 goto error;
3605
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003606 /*
3607 * Check if DMAR supports zero-length reads on write only
3608 * mappings..
3609 */
3610 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003611 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003612 prot |= DMA_PTE_READ;
3613 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3614 prot |= DMA_PTE_WRITE;
3615 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003616 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003617 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003618 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003619 * is not a big problem
3620 */
Omer Peleg2aac6302016-04-20 11:33:57 +03003621 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003622 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003623 if (ret)
3624 goto error;
3625
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003626 /* it's a non-present to present mapping. Only flush if caching mode */
3627 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003628 iommu_flush_iotlb_psi(iommu, domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003629 mm_to_dma_pfn(iova_pfn),
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003630 size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003631 else
Weidong Han8c11e792008-12-08 15:29:22 +08003632 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003633
Omer Peleg2aac6302016-04-20 11:33:57 +03003634 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
David Woodhouse03d6a242009-06-28 15:33:46 +01003635 start_paddr += paddr & ~PAGE_MASK;
3636 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003637
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003638error:
Omer Peleg2aac6302016-04-20 11:33:57 +03003639 if (iova_pfn)
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003640 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003641 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003642 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003643 return 0;
3644}
3645
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003646static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3647 unsigned long offset, size_t size,
3648 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003649 unsigned long attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003650{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003651 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003652 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003653}
3654
Omer Peleg769530e2016-04-20 11:33:25 +03003655static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003656{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003657 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003658 unsigned long start_pfn, last_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003659 unsigned long nrpages;
Omer Peleg2aac6302016-04-20 11:33:57 +03003660 unsigned long iova_pfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003661 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003662 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003663
David Woodhouse73676832009-07-04 14:08:36 +01003664 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003665 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003666
David Woodhouse1525a292014-03-06 16:19:30 +00003667 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003668 BUG_ON(!domain);
3669
Weidong Han8c11e792008-12-08 15:29:22 +08003670 iommu = domain_get_iommu(domain);
3671
Omer Peleg2aac6302016-04-20 11:33:57 +03003672 iova_pfn = IOVA_PFN(dev_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003673
Omer Peleg769530e2016-04-20 11:33:25 +03003674 nrpages = aligned_nrpages(dev_addr, size);
Omer Peleg2aac6302016-04-20 11:33:57 +03003675 start_pfn = mm_to_dma_pfn(iova_pfn);
Omer Peleg769530e2016-04-20 11:33:25 +03003676 last_pfn = start_pfn + nrpages - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003677
David Woodhoused794dc92009-06-28 00:27:49 +01003678 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003679 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003680
David Woodhouseea8ea462014-03-05 17:09:32 +00003681 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003682
mark gross5e0d2a62008-03-04 15:22:08 -08003683 if (intel_iommu_strict) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003684 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003685 nrpages, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003686 /* free iova */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003687 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
David Woodhouseea8ea462014-03-05 17:09:32 +00003688 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003689 } else {
Joerg Roedel13cf0172017-08-11 11:40:10 +02003690 queue_iova(&domain->iovad, iova_pfn, nrpages,
3691 (unsigned long)freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003692 /*
3693 * queue up the release of the unmap to save the 1/6th of the
3694 * cpu used up by the iotlb flush operation...
3695 */
mark gross5e0d2a62008-03-04 15:22:08 -08003696 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003697}
3698
Jiang Liud41a4ad2014-07-11 14:19:34 +08003699static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3700 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003701 unsigned long attrs)
Jiang Liud41a4ad2014-07-11 14:19:34 +08003702{
Omer Peleg769530e2016-04-20 11:33:25 +03003703 intel_unmap(dev, dev_addr, size);
Jiang Liud41a4ad2014-07-11 14:19:34 +08003704}
3705
David Woodhouse5040a912014-03-09 16:14:00 -07003706static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003707 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003708 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003709{
Akinobu Mita36746432014-06-04 16:06:51 -07003710 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003711 int order;
3712
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003713 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003714 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003715
David Woodhouse5040a912014-03-09 16:14:00 -07003716 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003717 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003718 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3719 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003720 flags |= GFP_DMA;
3721 else
3722 flags |= GFP_DMA32;
3723 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003724
Mel Gormand0164ad2015-11-06 16:28:21 -08003725 if (gfpflags_allow_blocking(flags)) {
Akinobu Mita36746432014-06-04 16:06:51 -07003726 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003727
Lucas Stach712c6042017-02-24 14:58:44 -08003728 page = dma_alloc_from_contiguous(dev, count, order, flags);
Akinobu Mita36746432014-06-04 16:06:51 -07003729 if (page && iommu_no_mapping(dev) &&
3730 page_to_phys(page) + size > dev->coherent_dma_mask) {
3731 dma_release_from_contiguous(dev, page, count);
3732 page = NULL;
3733 }
3734 }
3735
3736 if (!page)
3737 page = alloc_pages(flags, order);
3738 if (!page)
3739 return NULL;
3740 memset(page_address(page), 0, size);
3741
3742 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003743 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003744 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003745 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003746 return page_address(page);
3747 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3748 __free_pages(page, order);
3749
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003750 return NULL;
3751}
3752
David Woodhouse5040a912014-03-09 16:14:00 -07003753static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003754 dma_addr_t dma_handle, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003755{
3756 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003757 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003758
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003759 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003760 order = get_order(size);
3761
Omer Peleg769530e2016-04-20 11:33:25 +03003762 intel_unmap(dev, dma_handle, size);
Akinobu Mita36746432014-06-04 16:06:51 -07003763 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3764 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003765}
3766
David Woodhouse5040a912014-03-09 16:14:00 -07003767static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003768 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003769 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003770{
Omer Peleg769530e2016-04-20 11:33:25 +03003771 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3772 unsigned long nrpages = 0;
3773 struct scatterlist *sg;
3774 int i;
3775
3776 for_each_sg(sglist, sg, nelems, i) {
3777 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3778 }
3779
3780 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003781}
3782
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003783static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003784 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003785{
3786 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003787 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003788
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003789 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003790 BUG_ON(!sg_page(sg));
Robin Murphy29a90b72017-09-28 15:14:01 +01003791 sg->dma_address = sg_phys(sg);
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003792 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003793 }
3794 return nelems;
3795}
3796
David Woodhouse5040a912014-03-09 16:14:00 -07003797static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003798 enum dma_data_direction dir, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003799{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003800 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003801 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003802 size_t size = 0;
3803 int prot = 0;
Omer Peleg2aac6302016-04-20 11:33:57 +03003804 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003805 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003806 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003807 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003808 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003809
3810 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003811 if (iommu_no_mapping(dev))
3812 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003813
David Woodhouse5040a912014-03-09 16:14:00 -07003814 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003815 if (!domain)
3816 return 0;
3817
Weidong Han8c11e792008-12-08 15:29:22 +08003818 iommu = domain_get_iommu(domain);
3819
David Woodhouseb536d242009-06-28 14:49:31 +01003820 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003821 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003822
Omer Peleg2aac6302016-04-20 11:33:57 +03003823 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
David Woodhouse5040a912014-03-09 16:14:00 -07003824 *dev->dma_mask);
Omer Peleg2aac6302016-04-20 11:33:57 +03003825 if (!iova_pfn) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003826 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003827 return 0;
3828 }
3829
3830 /*
3831 * Check if DMAR supports zero-length reads on write only
3832 * mappings..
3833 */
3834 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003835 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003836 prot |= DMA_PTE_READ;
3837 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3838 prot |= DMA_PTE_WRITE;
3839
Omer Peleg2aac6302016-04-20 11:33:57 +03003840 start_vpfn = mm_to_dma_pfn(iova_pfn);
David Woodhousee1605492009-06-29 11:17:38 +01003841
Fenghua Yuf5329592009-08-04 15:09:37 -07003842 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003843 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003844 dma_pte_free_pagetable(domain, start_vpfn,
David Dillowbc24c572017-06-28 19:42:23 -07003845 start_vpfn + size - 1,
3846 agaw_to_level(domain->agaw) + 1);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003847 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
David Woodhousee1605492009-06-29 11:17:38 +01003848 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003849 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003850
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003851 /* it's a non-present to present mapping. Only flush if caching mode */
3852 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003853 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003854 else
Weidong Han8c11e792008-12-08 15:29:22 +08003855 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003856
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003857 return nelems;
3858}
3859
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003860static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3861{
3862 return !dma_addr;
3863}
3864
Arvind Yadav01e19322017-06-28 16:39:32 +05303865const struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003866 .alloc = intel_alloc_coherent,
3867 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003868 .map_sg = intel_map_sg,
3869 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003870 .map_page = intel_map_page,
3871 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003872 .mapping_error = intel_mapping_error,
Christoph Hellwig5860acc2017-05-22 11:38:27 +02003873#ifdef CONFIG_X86
3874 .dma_supported = x86_dma_supported,
3875#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003876};
3877
3878static inline int iommu_domain_cache_init(void)
3879{
3880 int ret = 0;
3881
3882 iommu_domain_cache = kmem_cache_create("iommu_domain",
3883 sizeof(struct dmar_domain),
3884 0,
3885 SLAB_HWCACHE_ALIGN,
3886
3887 NULL);
3888 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003889 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003890 ret = -ENOMEM;
3891 }
3892
3893 return ret;
3894}
3895
3896static inline int iommu_devinfo_cache_init(void)
3897{
3898 int ret = 0;
3899
3900 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3901 sizeof(struct device_domain_info),
3902 0,
3903 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003904 NULL);
3905 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003906 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003907 ret = -ENOMEM;
3908 }
3909
3910 return ret;
3911}
3912
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003913static int __init iommu_init_mempool(void)
3914{
3915 int ret;
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003916 ret = iova_cache_get();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003917 if (ret)
3918 return ret;
3919
3920 ret = iommu_domain_cache_init();
3921 if (ret)
3922 goto domain_error;
3923
3924 ret = iommu_devinfo_cache_init();
3925 if (!ret)
3926 return ret;
3927
3928 kmem_cache_destroy(iommu_domain_cache);
3929domain_error:
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003930 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003931
3932 return -ENOMEM;
3933}
3934
3935static void __init iommu_exit_mempool(void)
3936{
3937 kmem_cache_destroy(iommu_devinfo_cache);
3938 kmem_cache_destroy(iommu_domain_cache);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003939 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003940}
3941
Dan Williams556ab452010-07-23 15:47:56 -07003942static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3943{
3944 struct dmar_drhd_unit *drhd;
3945 u32 vtbar;
3946 int rc;
3947
3948 /* We know that this device on this chipset has its own IOMMU.
3949 * If we find it under a different IOMMU, then the BIOS is lying
3950 * to us. Hope that the IOMMU for this device is actually
3951 * disabled, and it needs no translation...
3952 */
3953 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3954 if (rc) {
3955 /* "can't" happen */
3956 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3957 return;
3958 }
3959 vtbar &= 0xffff0000;
3960
3961 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3962 drhd = dmar_find_matched_drhd_unit(pdev);
3963 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3964 TAINT_FIRMWARE_WORKAROUND,
3965 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3966 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3967}
3968DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3969
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003970static void __init init_no_remapping_devices(void)
3971{
3972 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003973 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003974 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003975
3976 for_each_drhd_unit(drhd) {
3977 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003978 for_each_active_dev_scope(drhd->devices,
3979 drhd->devices_cnt, i, dev)
3980 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003981 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003982 if (i == drhd->devices_cnt)
3983 drhd->ignored = 1;
3984 }
3985 }
3986
Jiang Liu7c919772014-01-06 14:18:18 +08003987 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003988 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003989 continue;
3990
Jiang Liub683b232014-02-19 14:07:32 +08003991 for_each_active_dev_scope(drhd->devices,
3992 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003993 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003994 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003995 if (i < drhd->devices_cnt)
3996 continue;
3997
David Woodhousec0771df2011-10-14 20:59:46 +01003998 /* This IOMMU has *only* gfx devices. Either bypass it or
3999 set the gfx_mapped flag, as appropriate */
4000 if (dmar_map_gfx) {
4001 intel_iommu_gfx_mapped = 1;
4002 } else {
4003 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08004004 for_each_active_dev_scope(drhd->devices,
4005 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00004006 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004007 }
4008 }
4009}
4010
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004011#ifdef CONFIG_SUSPEND
4012static int init_iommu_hw(void)
4013{
4014 struct dmar_drhd_unit *drhd;
4015 struct intel_iommu *iommu = NULL;
4016
4017 for_each_active_iommu(iommu, drhd)
4018 if (iommu->qi)
4019 dmar_reenable_qi(iommu);
4020
Joseph Cihulab7792602011-05-03 00:08:37 -07004021 for_each_iommu(iommu, drhd) {
4022 if (drhd->ignored) {
4023 /*
4024 * we always have to disable PMRs or DMA may fail on
4025 * this device
4026 */
4027 if (force_on)
4028 iommu_disable_protect_mem_regions(iommu);
4029 continue;
4030 }
4031
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004032 iommu_flush_write_buffer(iommu);
4033
4034 iommu_set_root_entry(iommu);
4035
4036 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004037 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08004038 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4039 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07004040 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004041 }
4042
4043 return 0;
4044}
4045
4046static void iommu_flush_all(void)
4047{
4048 struct dmar_drhd_unit *drhd;
4049 struct intel_iommu *iommu;
4050
4051 for_each_active_iommu(iommu, drhd) {
4052 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004053 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004054 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004055 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004056 }
4057}
4058
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004059static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004060{
4061 struct dmar_drhd_unit *drhd;
4062 struct intel_iommu *iommu = NULL;
4063 unsigned long flag;
4064
4065 for_each_active_iommu(iommu, drhd) {
4066 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4067 GFP_ATOMIC);
4068 if (!iommu->iommu_state)
4069 goto nomem;
4070 }
4071
4072 iommu_flush_all();
4073
4074 for_each_active_iommu(iommu, drhd) {
4075 iommu_disable_translation(iommu);
4076
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004077 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004078
4079 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4080 readl(iommu->reg + DMAR_FECTL_REG);
4081 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4082 readl(iommu->reg + DMAR_FEDATA_REG);
4083 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4084 readl(iommu->reg + DMAR_FEADDR_REG);
4085 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4086 readl(iommu->reg + DMAR_FEUADDR_REG);
4087
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004088 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004089 }
4090 return 0;
4091
4092nomem:
4093 for_each_active_iommu(iommu, drhd)
4094 kfree(iommu->iommu_state);
4095
4096 return -ENOMEM;
4097}
4098
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004099static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004100{
4101 struct dmar_drhd_unit *drhd;
4102 struct intel_iommu *iommu = NULL;
4103 unsigned long flag;
4104
4105 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07004106 if (force_on)
4107 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4108 else
4109 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004110 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004111 }
4112
4113 for_each_active_iommu(iommu, drhd) {
4114
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004115 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004116
4117 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4118 iommu->reg + DMAR_FECTL_REG);
4119 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4120 iommu->reg + DMAR_FEDATA_REG);
4121 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4122 iommu->reg + DMAR_FEADDR_REG);
4123 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4124 iommu->reg + DMAR_FEUADDR_REG);
4125
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004126 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004127 }
4128
4129 for_each_active_iommu(iommu, drhd)
4130 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004131}
4132
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004133static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004134 .resume = iommu_resume,
4135 .suspend = iommu_suspend,
4136};
4137
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004138static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004139{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004140 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004141}
4142
4143#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02004144static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004145#endif /* CONFIG_PM */
4146
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004147
Jiang Liuc2a0b532014-11-09 22:47:56 +08004148int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004149{
4150 struct acpi_dmar_reserved_memory *rmrr;
Eric Auger0659b8d2017-01-19 20:57:53 +00004151 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004152 struct dmar_rmrr_unit *rmrru;
Eric Auger0659b8d2017-01-19 20:57:53 +00004153 size_t length;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004154
4155 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4156 if (!rmrru)
Eric Auger0659b8d2017-01-19 20:57:53 +00004157 goto out;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004158
4159 rmrru->hdr = header;
4160 rmrr = (struct acpi_dmar_reserved_memory *)header;
4161 rmrru->base_address = rmrr->base_address;
4162 rmrru->end_address = rmrr->end_address;
Eric Auger0659b8d2017-01-19 20:57:53 +00004163
4164 length = rmrr->end_address - rmrr->base_address + 1;
4165 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4166 IOMMU_RESV_DIRECT);
4167 if (!rmrru->resv)
4168 goto free_rmrru;
4169
Jiang Liu2e455282014-02-19 14:07:36 +08004170 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4171 ((void *)rmrr) + rmrr->header.length,
4172 &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004173 if (rmrru->devices_cnt && rmrru->devices == NULL)
4174 goto free_all;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004175
Jiang Liu2e455282014-02-19 14:07:36 +08004176 list_add(&rmrru->list, &dmar_rmrr_units);
4177
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004178 return 0;
Eric Auger0659b8d2017-01-19 20:57:53 +00004179free_all:
4180 kfree(rmrru->resv);
4181free_rmrru:
4182 kfree(rmrru);
4183out:
4184 return -ENOMEM;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004185}
4186
Jiang Liu6b197242014-11-09 22:47:58 +08004187static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4188{
4189 struct dmar_atsr_unit *atsru;
4190 struct acpi_dmar_atsr *tmp;
4191
4192 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4193 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4194 if (atsr->segment != tmp->segment)
4195 continue;
4196 if (atsr->header.length != tmp->header.length)
4197 continue;
4198 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4199 return atsru;
4200 }
4201
4202 return NULL;
4203}
4204
4205int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004206{
4207 struct acpi_dmar_atsr *atsr;
4208 struct dmar_atsr_unit *atsru;
4209
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004210 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
Jiang Liu6b197242014-11-09 22:47:58 +08004211 return 0;
4212
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004213 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004214 atsru = dmar_find_atsr(atsr);
4215 if (atsru)
4216 return 0;
4217
4218 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004219 if (!atsru)
4220 return -ENOMEM;
4221
Jiang Liu6b197242014-11-09 22:47:58 +08004222 /*
4223 * If memory is allocated from slab by ACPI _DSM method, we need to
4224 * copy the memory content because the memory buffer will be freed
4225 * on return.
4226 */
4227 atsru->hdr = (void *)(atsru + 1);
4228 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004229 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004230 if (!atsru->include_all) {
4231 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4232 (void *)atsr + atsr->header.length,
4233 &atsru->devices_cnt);
4234 if (atsru->devices_cnt && atsru->devices == NULL) {
4235 kfree(atsru);
4236 return -ENOMEM;
4237 }
4238 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004239
Jiang Liu0e2426122014-02-19 14:07:34 +08004240 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004241
4242 return 0;
4243}
4244
Jiang Liu9bdc5312014-01-06 14:18:27 +08004245static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4246{
4247 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4248 kfree(atsru);
4249}
4250
Jiang Liu6b197242014-11-09 22:47:58 +08004251int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4252{
4253 struct acpi_dmar_atsr *atsr;
4254 struct dmar_atsr_unit *atsru;
4255
4256 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4257 atsru = dmar_find_atsr(atsr);
4258 if (atsru) {
4259 list_del_rcu(&atsru->list);
4260 synchronize_rcu();
4261 intel_iommu_free_atsr(atsru);
4262 }
4263
4264 return 0;
4265}
4266
4267int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4268{
4269 int i;
4270 struct device *dev;
4271 struct acpi_dmar_atsr *atsr;
4272 struct dmar_atsr_unit *atsru;
4273
4274 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4275 atsru = dmar_find_atsr(atsr);
4276 if (!atsru)
4277 return 0;
4278
Linus Torvalds194dc872016-07-27 20:03:31 -07004279 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08004280 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4281 i, dev)
4282 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07004283 }
Jiang Liu6b197242014-11-09 22:47:58 +08004284
4285 return 0;
4286}
4287
Jiang Liuffebeb42014-11-09 22:48:02 +08004288static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4289{
4290 int sp, ret = 0;
4291 struct intel_iommu *iommu = dmaru->iommu;
4292
4293 if (g_iommus[iommu->seq_id])
4294 return 0;
4295
4296 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004297 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004298 iommu->name);
4299 return -ENXIO;
4300 }
4301 if (!ecap_sc_support(iommu->ecap) &&
4302 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004303 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004304 iommu->name);
4305 return -ENXIO;
4306 }
4307 sp = domain_update_iommu_superpage(iommu) - 1;
4308 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004309 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004310 iommu->name);
4311 return -ENXIO;
4312 }
4313
4314 /*
4315 * Disable translation if already enabled prior to OS handover.
4316 */
4317 if (iommu->gcmd & DMA_GCMD_TE)
4318 iommu_disable_translation(iommu);
4319
4320 g_iommus[iommu->seq_id] = iommu;
4321 ret = iommu_init_domains(iommu);
4322 if (ret == 0)
4323 ret = iommu_alloc_root_entry(iommu);
4324 if (ret)
4325 goto out;
4326
David Woodhouse8a94ade2015-03-24 14:54:56 +00004327#ifdef CONFIG_INTEL_IOMMU_SVM
4328 if (pasid_enabled(iommu))
4329 intel_svm_alloc_pasid_tables(iommu);
4330#endif
4331
Jiang Liuffebeb42014-11-09 22:48:02 +08004332 if (dmaru->ignored) {
4333 /*
4334 * we always have to disable PMRs or DMA may fail on this device
4335 */
4336 if (force_on)
4337 iommu_disable_protect_mem_regions(iommu);
4338 return 0;
4339 }
4340
4341 intel_iommu_init_qi(iommu);
4342 iommu_flush_write_buffer(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01004343
4344#ifdef CONFIG_INTEL_IOMMU_SVM
4345 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4346 ret = intel_svm_enable_prq(iommu);
4347 if (ret)
4348 goto disable_iommu;
4349 }
4350#endif
Jiang Liuffebeb42014-11-09 22:48:02 +08004351 ret = dmar_set_interrupt(iommu);
4352 if (ret)
4353 goto disable_iommu;
4354
4355 iommu_set_root_entry(iommu);
4356 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4357 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4358 iommu_enable_translation(iommu);
4359
Jiang Liuffebeb42014-11-09 22:48:02 +08004360 iommu_disable_protect_mem_regions(iommu);
4361 return 0;
4362
4363disable_iommu:
4364 disable_dmar_iommu(iommu);
4365out:
4366 free_dmar_iommu(iommu);
4367 return ret;
4368}
4369
Jiang Liu6b197242014-11-09 22:47:58 +08004370int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4371{
Jiang Liuffebeb42014-11-09 22:48:02 +08004372 int ret = 0;
4373 struct intel_iommu *iommu = dmaru->iommu;
4374
4375 if (!intel_iommu_enabled)
4376 return 0;
4377 if (iommu == NULL)
4378 return -EINVAL;
4379
4380 if (insert) {
4381 ret = intel_iommu_add(dmaru);
4382 } else {
4383 disable_dmar_iommu(iommu);
4384 free_dmar_iommu(iommu);
4385 }
4386
4387 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004388}
4389
Jiang Liu9bdc5312014-01-06 14:18:27 +08004390static void intel_iommu_free_dmars(void)
4391{
4392 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4393 struct dmar_atsr_unit *atsru, *atsr_n;
4394
4395 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4396 list_del(&rmrru->list);
4397 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004398 kfree(rmrru->resv);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004399 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004400 }
4401
Jiang Liu9bdc5312014-01-06 14:18:27 +08004402 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4403 list_del(&atsru->list);
4404 intel_iommu_free_atsr(atsru);
4405 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004406}
4407
4408int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4409{
Jiang Liub683b232014-02-19 14:07:32 +08004410 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004411 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004412 struct pci_dev *bridge = NULL;
4413 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004414 struct acpi_dmar_atsr *atsr;
4415 struct dmar_atsr_unit *atsru;
4416
4417 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004418 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004419 bridge = bus->self;
David Woodhoused14053b32015-10-15 09:28:06 +01004420 /* If it's an integrated device, allow ATS */
4421 if (!bridge)
4422 return 1;
4423 /* Connected via non-PCIe: no ATS */
4424 if (!pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004425 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004426 return 0;
David Woodhoused14053b32015-10-15 09:28:06 +01004427 /* If we found the root port, look it up in the ATSR */
Jiang Liub5f82dd2014-02-19 14:07:31 +08004428 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004429 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004430 }
4431
Jiang Liu0e2426122014-02-19 14:07:34 +08004432 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004433 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4434 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4435 if (atsr->segment != pci_domain_nr(dev->bus))
4436 continue;
4437
Jiang Liub683b232014-02-19 14:07:32 +08004438 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004439 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004440 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004441
4442 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004443 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004444 }
Jiang Liub683b232014-02-19 14:07:32 +08004445 ret = 0;
4446out:
Jiang Liu0e2426122014-02-19 14:07:34 +08004447 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004448
Jiang Liub683b232014-02-19 14:07:32 +08004449 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004450}
4451
Jiang Liu59ce0512014-02-19 14:07:35 +08004452int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4453{
4454 int ret = 0;
4455 struct dmar_rmrr_unit *rmrru;
4456 struct dmar_atsr_unit *atsru;
4457 struct acpi_dmar_atsr *atsr;
4458 struct acpi_dmar_reserved_memory *rmrr;
4459
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004460 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
Jiang Liu59ce0512014-02-19 14:07:35 +08004461 return 0;
4462
4463 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4464 rmrr = container_of(rmrru->hdr,
4465 struct acpi_dmar_reserved_memory, header);
4466 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4467 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4468 ((void *)rmrr) + rmrr->header.length,
4469 rmrr->segment, rmrru->devices,
4470 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004471 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004472 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004473 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004474 dmar_remove_dev_scope(info, rmrr->segment,
4475 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004476 }
4477 }
4478
4479 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4480 if (atsru->include_all)
4481 continue;
4482
4483 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4484 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4485 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4486 (void *)atsr + atsr->header.length,
4487 atsr->segment, atsru->devices,
4488 atsru->devices_cnt);
4489 if (ret > 0)
4490 break;
4491 else if(ret < 0)
4492 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004493 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu59ce0512014-02-19 14:07:35 +08004494 if (dmar_remove_dev_scope(info, atsr->segment,
4495 atsru->devices, atsru->devices_cnt))
4496 break;
4497 }
4498 }
4499
4500 return 0;
4501}
4502
Fenghua Yu99dcade2009-11-11 07:23:06 -08004503/*
4504 * Here we only respond to action of unbound device from driver.
4505 *
4506 * Added device is not attached to its DMAR domain here yet. That will happen
4507 * when mapping the device to iova.
4508 */
4509static int device_notifier(struct notifier_block *nb,
4510 unsigned long action, void *data)
4511{
4512 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004513 struct dmar_domain *domain;
4514
David Woodhouse3d891942014-03-06 15:59:26 +00004515 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004516 return 0;
4517
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004518 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004519 return 0;
4520
David Woodhouse1525a292014-03-06 16:19:30 +00004521 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004522 if (!domain)
4523 return 0;
4524
Joerg Roedele6de0f82015-07-22 16:30:36 +02004525 dmar_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004526 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004527 domain_exit(domain);
Alex Williamsona97590e2011-03-04 14:52:16 -07004528
Fenghua Yu99dcade2009-11-11 07:23:06 -08004529 return 0;
4530}
4531
4532static struct notifier_block device_nb = {
4533 .notifier_call = device_notifier,
4534};
4535
Jiang Liu75f05562014-02-19 14:07:37 +08004536static int intel_iommu_memory_notifier(struct notifier_block *nb,
4537 unsigned long val, void *v)
4538{
4539 struct memory_notify *mhp = v;
4540 unsigned long long start, end;
4541 unsigned long start_vpfn, last_vpfn;
4542
4543 switch (val) {
4544 case MEM_GOING_ONLINE:
4545 start = mhp->start_pfn << PAGE_SHIFT;
4546 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4547 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004548 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004549 start, end);
4550 return NOTIFY_BAD;
4551 }
4552 break;
4553
4554 case MEM_OFFLINE:
4555 case MEM_CANCEL_ONLINE:
4556 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4557 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4558 while (start_vpfn <= last_vpfn) {
4559 struct iova *iova;
4560 struct dmar_drhd_unit *drhd;
4561 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004562 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004563
4564 iova = find_iova(&si_domain->iovad, start_vpfn);
4565 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004566 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004567 start_vpfn);
4568 break;
4569 }
4570
4571 iova = split_and_remove_iova(&si_domain->iovad, iova,
4572 start_vpfn, last_vpfn);
4573 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004574 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004575 start_vpfn, last_vpfn);
4576 return NOTIFY_BAD;
4577 }
4578
David Woodhouseea8ea462014-03-05 17:09:32 +00004579 freelist = domain_unmap(si_domain, iova->pfn_lo,
4580 iova->pfn_hi);
4581
Jiang Liu75f05562014-02-19 14:07:37 +08004582 rcu_read_lock();
4583 for_each_active_iommu(iommu, drhd)
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004584 iommu_flush_iotlb_psi(iommu, si_domain,
Jiang Liua156ef92014-07-11 14:19:36 +08004585 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004586 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004587 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004588 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004589
4590 start_vpfn = iova->pfn_hi + 1;
4591 free_iova_mem(iova);
4592 }
4593 break;
4594 }
4595
4596 return NOTIFY_OK;
4597}
4598
4599static struct notifier_block intel_iommu_memory_nb = {
4600 .notifier_call = intel_iommu_memory_notifier,
4601 .priority = 0
4602};
4603
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004604static void free_all_cpu_cached_iovas(unsigned int cpu)
4605{
4606 int i;
4607
4608 for (i = 0; i < g_num_of_iommus; i++) {
4609 struct intel_iommu *iommu = g_iommus[i];
4610 struct dmar_domain *domain;
Aaron Campbell0caa7612016-07-02 21:23:24 -03004611 int did;
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004612
4613 if (!iommu)
4614 continue;
4615
Jan Niehusmann3bd4f912016-06-06 14:20:11 +02004616 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
Aaron Campbell0caa7612016-07-02 21:23:24 -03004617 domain = get_iommu_domain(iommu, (u16)did);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004618
4619 if (!domain)
4620 continue;
4621 free_cpu_cached_iovas(cpu, &domain->iovad);
4622 }
4623 }
4624}
4625
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004626static int intel_iommu_cpu_dead(unsigned int cpu)
Omer Pelegaa473242016-04-20 11:33:02 +03004627{
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004628 free_all_cpu_cached_iovas(cpu);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004629 return 0;
Omer Pelegaa473242016-04-20 11:33:02 +03004630}
4631
Joerg Roedel161b28a2017-03-28 17:04:52 +02004632static void intel_disable_iommus(void)
4633{
4634 struct intel_iommu *iommu = NULL;
4635 struct dmar_drhd_unit *drhd;
4636
4637 for_each_iommu(iommu, drhd)
4638 iommu_disable_translation(iommu);
4639}
4640
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004641static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4642{
Joerg Roedel2926a2aa2017-08-14 17:19:26 +02004643 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4644
4645 return container_of(iommu_dev, struct intel_iommu, iommu);
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004646}
4647
Alex Williamsona5459cf2014-06-12 16:12:31 -06004648static ssize_t intel_iommu_show_version(struct device *dev,
4649 struct device_attribute *attr,
4650 char *buf)
4651{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004652 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004653 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4654 return sprintf(buf, "%d:%d\n",
4655 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4656}
4657static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4658
4659static ssize_t intel_iommu_show_address(struct device *dev,
4660 struct device_attribute *attr,
4661 char *buf)
4662{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004663 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004664 return sprintf(buf, "%llx\n", iommu->reg_phys);
4665}
4666static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4667
4668static ssize_t intel_iommu_show_cap(struct device *dev,
4669 struct device_attribute *attr,
4670 char *buf)
4671{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004672 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004673 return sprintf(buf, "%llx\n", iommu->cap);
4674}
4675static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4676
4677static ssize_t intel_iommu_show_ecap(struct device *dev,
4678 struct device_attribute *attr,
4679 char *buf)
4680{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004681 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004682 return sprintf(buf, "%llx\n", iommu->ecap);
4683}
4684static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4685
Alex Williamson2238c082015-07-14 15:24:53 -06004686static ssize_t intel_iommu_show_ndoms(struct device *dev,
4687 struct device_attribute *attr,
4688 char *buf)
4689{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004690 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004691 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4692}
4693static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4694
4695static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4696 struct device_attribute *attr,
4697 char *buf)
4698{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004699 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004700 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4701 cap_ndoms(iommu->cap)));
4702}
4703static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4704
Alex Williamsona5459cf2014-06-12 16:12:31 -06004705static struct attribute *intel_iommu_attrs[] = {
4706 &dev_attr_version.attr,
4707 &dev_attr_address.attr,
4708 &dev_attr_cap.attr,
4709 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004710 &dev_attr_domains_supported.attr,
4711 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004712 NULL,
4713};
4714
4715static struct attribute_group intel_iommu_group = {
4716 .name = "intel-iommu",
4717 .attrs = intel_iommu_attrs,
4718};
4719
4720const struct attribute_group *intel_iommu_groups[] = {
4721 &intel_iommu_group,
4722 NULL,
4723};
4724
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004725int __init intel_iommu_init(void)
4726{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004727 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004728 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004729 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004730
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004731 /* VT-d is required for a TXT/tboot launch, so enforce that */
4732 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004733
Jiang Liu3a5670e2014-02-19 14:07:33 +08004734 if (iommu_init_mempool()) {
4735 if (force_on)
4736 panic("tboot: Failed to initialize iommu memory\n");
4737 return -ENOMEM;
4738 }
4739
4740 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004741 if (dmar_table_init()) {
4742 if (force_on)
4743 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004744 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004745 }
4746
Suresh Siddhac2c72862011-08-23 17:05:19 -07004747 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004748 if (force_on)
4749 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004750 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004751 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004752
Joerg Roedelec154bf2017-10-06 15:00:53 +02004753 up_write(&dmar_global_lock);
4754
4755 /*
4756 * The bus notifier takes the dmar_global_lock, so lockdep will
4757 * complain later when we register it under the lock.
4758 */
4759 dmar_register_bus_notifier();
4760
4761 down_write(&dmar_global_lock);
4762
Joerg Roedel161b28a2017-03-28 17:04:52 +02004763 if (no_iommu || dmar_disabled) {
4764 /*
Shaohua Libfd20f12017-04-26 09:18:35 -07004765 * We exit the function here to ensure IOMMU's remapping and
4766 * mempool aren't setup, which means that the IOMMU's PMRs
4767 * won't be disabled via the call to init_dmars(). So disable
4768 * it explicitly here. The PMRs were setup by tboot prior to
4769 * calling SENTER, but the kernel is expected to reset/tear
4770 * down the PMRs.
4771 */
4772 if (intel_iommu_tboot_noforce) {
4773 for_each_iommu(iommu, drhd)
4774 iommu_disable_protect_mem_regions(iommu);
4775 }
4776
4777 /*
Joerg Roedel161b28a2017-03-28 17:04:52 +02004778 * Make sure the IOMMUs are switched off, even when we
4779 * boot into a kexec kernel and the previous kernel left
4780 * them enabled
4781 */
4782 intel_disable_iommus();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004783 goto out_free_dmar;
Joerg Roedel161b28a2017-03-28 17:04:52 +02004784 }
Suresh Siddha2ae21012008-07-10 11:16:43 -07004785
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004786 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004787 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004788
4789 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004790 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004791
Joseph Cihula51a63e62011-03-21 11:04:24 -07004792 if (dmar_init_reserved_ranges()) {
4793 if (force_on)
4794 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004795 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004796 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004797
4798 init_no_remapping_devices();
4799
Joseph Cihulab7792602011-05-03 00:08:37 -07004800 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004801 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004802 if (force_on)
4803 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004804 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004805 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004806 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004807 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004808 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004809
Christoph Hellwig4fac8072017-12-24 13:57:08 +01004810#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004811 swiotlb = 0;
4812#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004813 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004814
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004815 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004816
Joerg Roedel39ab9552017-02-01 16:56:46 +01004817 for_each_active_iommu(iommu, drhd) {
4818 iommu_device_sysfs_add(&iommu->iommu, NULL,
4819 intel_iommu_groups,
4820 "%s", iommu->name);
4821 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4822 iommu_device_register(&iommu->iommu);
4823 }
Alex Williamsona5459cf2014-06-12 16:12:31 -06004824
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004825 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004826 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004827 if (si_domain && !hw_pass_through)
4828 register_memory_notifier(&intel_iommu_memory_nb);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004829 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4830 intel_iommu_cpu_dead);
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004831 intel_iommu_enabled = 1;
4832
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004833 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004834
4835out_free_reserved_range:
4836 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004837out_free_dmar:
4838 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004839 up_write(&dmar_global_lock);
4840 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004841 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004842}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004843
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004844static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
Alex Williamson579305f2014-07-03 09:51:43 -06004845{
4846 struct intel_iommu *iommu = opaque;
4847
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004848 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06004849 return 0;
4850}
4851
4852/*
4853 * NB - intel-iommu lacks any sort of reference counting for the users of
4854 * dependent devices. If multiple endpoints have intersecting dependent
4855 * devices, unbinding the driver from any one of them will possibly leave
4856 * the others unable to operate.
4857 */
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004858static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004859{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004860 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004861 return;
4862
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004863 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004864}
4865
Joerg Roedel127c7612015-07-23 17:44:46 +02004866static void __dmar_remove_one_dev_info(struct device_domain_info *info)
Weidong Hanc7151a82008-12-08 22:51:37 +08004867{
Weidong Hanc7151a82008-12-08 22:51:37 +08004868 struct intel_iommu *iommu;
4869 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08004870
Joerg Roedel55d94042015-07-22 16:50:40 +02004871 assert_spin_locked(&device_domain_lock);
4872
Joerg Roedelb608ac32015-07-21 18:19:08 +02004873 if (WARN_ON(!info))
Weidong Hanc7151a82008-12-08 22:51:37 +08004874 return;
4875
Joerg Roedel127c7612015-07-23 17:44:46 +02004876 iommu = info->iommu;
4877
4878 if (info->dev) {
4879 iommu_disable_dev_iotlb(info);
4880 domain_context_clear(iommu, info->dev);
4881 }
4882
Joerg Roedelb608ac32015-07-21 18:19:08 +02004883 unlink_domain_info(info);
Roland Dreier3e7abe22011-07-20 06:22:21 -07004884
Joerg Roedeld160aca2015-07-22 11:52:53 +02004885 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004886 domain_detach_iommu(info->domain, iommu);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004887 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004888
4889 free_devinfo_mem(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004890}
4891
Joerg Roedel55d94042015-07-22 16:50:40 +02004892static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4893 struct device *dev)
4894{
Joerg Roedel127c7612015-07-23 17:44:46 +02004895 struct device_domain_info *info;
Joerg Roedel55d94042015-07-22 16:50:40 +02004896 unsigned long flags;
4897
Weidong Hanc7151a82008-12-08 22:51:37 +08004898 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004899 info = dev->archdata.iommu;
4900 __dmar_remove_one_dev_info(info);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004901 spin_unlock_irqrestore(&device_domain_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004902}
4903
4904static int md_domain_init(struct dmar_domain *domain, int guest_width)
4905{
4906 int adjust_width;
4907
Zhen Leiaa3ac942017-09-21 16:52:45 +01004908 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004909 domain_reserve_special_ranges(domain);
4910
4911 /* calculate AGAW */
4912 domain->gaw = guest_width;
4913 adjust_width = guestwidth_to_adjustwidth(guest_width);
4914 domain->agaw = width_to_agaw(adjust_width);
4915
Weidong Han5e98c4b2008-12-08 23:03:27 +08004916 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004917 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004918 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004919 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004920
4921 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004922 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004923 if (!domain->pgd)
4924 return -ENOMEM;
4925 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4926 return 0;
4927}
4928
Joerg Roedel00a77de2015-03-26 13:43:08 +01004929static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004930{
Joerg Roedel5d450802008-12-03 14:52:32 +01004931 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004932 struct iommu_domain *domain;
4933
4934 if (type != IOMMU_DOMAIN_UNMANAGED)
4935 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004936
Jiang Liuab8dfe22014-07-11 14:19:27 +08004937 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004938 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004939 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004940 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004941 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004942 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004943 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004944 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004945 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004946 }
Allen Kay8140a952011-10-14 12:32:17 -07004947 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004948
Joerg Roedel00a77de2015-03-26 13:43:08 +01004949 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004950 domain->geometry.aperture_start = 0;
4951 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4952 domain->geometry.force_aperture = true;
4953
Joerg Roedel00a77de2015-03-26 13:43:08 +01004954 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004955}
Kay, Allen M38717942008-09-09 18:37:29 +03004956
Joerg Roedel00a77de2015-03-26 13:43:08 +01004957static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004958{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004959 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004960}
Kay, Allen M38717942008-09-09 18:37:29 +03004961
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004962static int intel_iommu_attach_device(struct iommu_domain *domain,
4963 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004964{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004965 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004966 struct intel_iommu *iommu;
4967 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004968 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004969
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004970 if (device_is_rmrr_locked(dev)) {
4971 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4972 return -EPERM;
4973 }
4974
David Woodhouse7207d8f2014-03-09 16:31:06 -07004975 /* normally dev is not mapped */
4976 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004977 struct dmar_domain *old_domain;
4978
David Woodhouse1525a292014-03-06 16:19:30 +00004979 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004980 if (old_domain) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02004981 rcu_read_lock();
Joerg Roedelde7e8882015-07-22 11:58:07 +02004982 dmar_remove_one_dev_info(old_domain, dev);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004983 rcu_read_unlock();
Joerg Roedel62c22162014-12-09 12:56:45 +01004984
4985 if (!domain_type_is_vm_or_si(old_domain) &&
4986 list_empty(&old_domain->devices))
4987 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004988 }
4989 }
4990
David Woodhouse156baca2014-03-09 14:00:57 -07004991 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004992 if (!iommu)
4993 return -ENODEV;
4994
4995 /* check if this iommu agaw is sufficient for max mapped address */
4996 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004997 if (addr_width > cap_mgaw(iommu->cap))
4998 addr_width = cap_mgaw(iommu->cap);
4999
5000 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005001 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005002 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01005003 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005004 return -EFAULT;
5005 }
Tom Lyona99c47a2010-05-17 08:20:45 +01005006 dmar_domain->gaw = addr_width;
5007
5008 /*
5009 * Knock out extra levels of page tables if necessary
5010 */
5011 while (iommu->agaw < dmar_domain->agaw) {
5012 struct dma_pte *pte;
5013
5014 pte = dmar_domain->pgd;
5015 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08005016 dmar_domain->pgd = (struct dma_pte *)
5017 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01005018 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01005019 }
5020 dmar_domain->agaw--;
5021 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005022
Joerg Roedel28ccce02015-07-21 14:45:31 +02005023 return domain_add_dev_info(dmar_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005024}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005025
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005026static void intel_iommu_detach_device(struct iommu_domain *domain,
5027 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005028{
Joerg Roedele6de0f82015-07-22 16:30:36 +02005029 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03005030}
Kay, Allen M38717942008-09-09 18:37:29 +03005031
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005032static int intel_iommu_map(struct iommu_domain *domain,
5033 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005034 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03005035{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005036 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005037 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005038 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005039 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005040
Joerg Roedeldde57a22008-12-03 15:04:09 +01005041 if (iommu_prot & IOMMU_READ)
5042 prot |= DMA_PTE_READ;
5043 if (iommu_prot & IOMMU_WRITE)
5044 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08005045 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5046 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005047
David Woodhouse163cc522009-06-28 00:51:17 +01005048 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005049 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005050 u64 end;
5051
5052 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01005053 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005054 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005055 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005056 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01005057 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005058 return -EFAULT;
5059 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01005060 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005061 }
David Woodhousead051222009-06-28 14:22:28 +01005062 /* Round up size to next multiple of PAGE_SIZE, if it and
5063 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01005064 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01005065 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5066 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005067 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03005068}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005069
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005070static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00005071 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005072{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005073 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00005074 struct page *freelist = NULL;
5075 struct intel_iommu *iommu;
5076 unsigned long start_pfn, last_pfn;
5077 unsigned int npages;
Joerg Roedel42e8c182015-07-21 15:50:02 +02005078 int iommu_id, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01005079
David Woodhouse5cf0a762014-03-19 16:07:49 +00005080 /* Cope with horrid API which requires us to unmap more than the
5081 size argument if it happens to be a large-page mapping. */
Joerg Roedeldc02e462015-08-13 11:15:13 +02005082 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
David Woodhouse5cf0a762014-03-19 16:07:49 +00005083
5084 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5085 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5086
David Woodhouseea8ea462014-03-05 17:09:32 +00005087 start_pfn = iova >> VTD_PAGE_SHIFT;
5088 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5089
5090 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5091
5092 npages = last_pfn - start_pfn + 1;
5093
Joerg Roedel29a27712015-07-21 17:17:12 +02005094 for_each_domain_iommu(iommu_id, dmar_domain) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02005095 iommu = g_iommus[iommu_id];
David Woodhouseea8ea462014-03-05 17:09:32 +00005096
Joerg Roedel42e8c182015-07-21 15:50:02 +02005097 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5098 start_pfn, npages, !freelist, 0);
David Woodhouseea8ea462014-03-05 17:09:32 +00005099 }
5100
5101 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005102
David Woodhouse163cc522009-06-28 00:51:17 +01005103 if (dmar_domain->max_addr == iova + size)
5104 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005105
David Woodhouse5cf0a762014-03-19 16:07:49 +00005106 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005107}
Kay, Allen M38717942008-09-09 18:37:29 +03005108
Joerg Roedeld14d6572008-12-03 15:06:57 +01005109static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05305110 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03005111{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005112 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03005113 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00005114 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005115 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03005116
David Woodhouse5cf0a762014-03-19 16:07:49 +00005117 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03005118 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005119 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03005120
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005121 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03005122}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005123
Joerg Roedel5d587b82014-09-05 10:50:45 +02005124static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005125{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005126 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005127 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04005128 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005129 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005130
Joerg Roedel5d587b82014-09-05 10:50:45 +02005131 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005132}
5133
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005134static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005135{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005136 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005137 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07005138 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04005139
Alex Williamsona5459cf2014-06-12 16:12:31 -06005140 iommu = device_to_iommu(dev, &bus, &devfn);
5141 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005142 return -ENODEV;
5143
Joerg Roedele3d10af2017-02-01 17:23:22 +01005144 iommu_device_link(&iommu->iommu, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005145
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005146 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06005147
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005148 if (IS_ERR(group))
5149 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005150
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005151 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005152 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005153}
5154
5155static void intel_iommu_remove_device(struct device *dev)
5156{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005157 struct intel_iommu *iommu;
5158 u8 bus, devfn;
5159
5160 iommu = device_to_iommu(dev, &bus, &devfn);
5161 if (!iommu)
5162 return;
5163
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005164 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06005165
Joerg Roedele3d10af2017-02-01 17:23:22 +01005166 iommu_device_unlink(&iommu->iommu, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005167}
5168
Eric Auger0659b8d2017-01-19 20:57:53 +00005169static void intel_iommu_get_resv_regions(struct device *device,
5170 struct list_head *head)
5171{
5172 struct iommu_resv_region *reg;
5173 struct dmar_rmrr_unit *rmrr;
5174 struct device *i_dev;
5175 int i;
5176
5177 rcu_read_lock();
5178 for_each_rmrr_units(rmrr) {
5179 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5180 i, i_dev) {
5181 if (i_dev != device)
5182 continue;
5183
5184 list_add_tail(&rmrr->resv->list, head);
5185 }
5186 }
5187 rcu_read_unlock();
5188
5189 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5190 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00005191 0, IOMMU_RESV_MSI);
Eric Auger0659b8d2017-01-19 20:57:53 +00005192 if (!reg)
5193 return;
5194 list_add_tail(&reg->list, head);
5195}
5196
5197static void intel_iommu_put_resv_regions(struct device *dev,
5198 struct list_head *head)
5199{
5200 struct iommu_resv_region *entry, *next;
5201
5202 list_for_each_entry_safe(entry, next, head, list) {
5203 if (entry->type == IOMMU_RESV_RESERVED)
5204 kfree(entry);
5205 }
Kay, Allen M38717942008-09-09 18:37:29 +03005206}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005207
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005208#ifdef CONFIG_INTEL_IOMMU_SVM
Jacob Pan65ca7f52016-12-06 10:14:23 -08005209#define MAX_NR_PASID_BITS (20)
5210static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5211{
5212 /*
5213 * Convert ecap_pss to extend context entry pts encoding, also
5214 * respect the soft pasid_max value set by the iommu.
5215 * - number of PASID bits = ecap_pss + 1
5216 * - number of PASID table entries = 2^(pts + 5)
5217 * Therefore, pts = ecap_pss - 4
5218 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5219 */
5220 if (ecap_pss(iommu->ecap) < 5)
5221 return 0;
5222
5223 /* pasid_max is encoded as actual number of entries not the bits */
5224 return find_first_bit((unsigned long *)&iommu->pasid_max,
5225 MAX_NR_PASID_BITS) - 5;
5226}
5227
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005228int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5229{
5230 struct device_domain_info *info;
5231 struct context_entry *context;
5232 struct dmar_domain *domain;
5233 unsigned long flags;
5234 u64 ctx_lo;
5235 int ret;
5236
5237 domain = get_valid_domain_for_dev(sdev->dev);
5238 if (!domain)
5239 return -EINVAL;
5240
5241 spin_lock_irqsave(&device_domain_lock, flags);
5242 spin_lock(&iommu->lock);
5243
5244 ret = -EINVAL;
5245 info = sdev->dev->archdata.iommu;
5246 if (!info || !info->pasid_supported)
5247 goto out;
5248
5249 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5250 if (WARN_ON(!context))
5251 goto out;
5252
5253 ctx_lo = context[0].lo;
5254
5255 sdev->did = domain->iommu_did[iommu->seq_id];
5256 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5257
5258 if (!(ctx_lo & CONTEXT_PASIDE)) {
Ashok Raj11b93eb2017-08-08 13:29:28 -07005259 if (iommu->pasid_state_table)
5260 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
Jacob Pan65ca7f52016-12-06 10:14:23 -08005261 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5262 intel_iommu_get_pts(iommu);
5263
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005264 wmb();
5265 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5266 * extended to permit requests-with-PASID if the PASIDE bit
5267 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5268 * however, the PASIDE bit is ignored and requests-with-PASID
5269 * are unconditionally blocked. Which makes less sense.
5270 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5271 * "guest mode" translation types depending on whether ATS
5272 * is available or not. Annoyingly, we can't use the new
5273 * modes *unless* PASIDE is set. */
5274 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5275 ctx_lo &= ~CONTEXT_TT_MASK;
5276 if (info->ats_supported)
5277 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5278 else
5279 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5280 }
5281 ctx_lo |= CONTEXT_PASIDE;
David Woodhouse907fea32015-10-13 14:11:13 +01005282 if (iommu->pasid_state_table)
5283 ctx_lo |= CONTEXT_DINVE;
David Woodhousea222a7f2015-10-07 23:35:18 +01005284 if (info->pri_supported)
5285 ctx_lo |= CONTEXT_PRS;
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005286 context[0].lo = ctx_lo;
5287 wmb();
5288 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5289 DMA_CCMD_MASK_NOBIT,
5290 DMA_CCMD_DEVICE_INVL);
5291 }
5292
5293 /* Enable PASID support in the device, if it wasn't already */
5294 if (!info->pasid_enabled)
5295 iommu_enable_dev_iotlb(info);
5296
5297 if (info->ats_enabled) {
5298 sdev->dev_iotlb = 1;
5299 sdev->qdep = info->ats_qdep;
5300 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5301 sdev->qdep = 0;
5302 }
5303 ret = 0;
5304
5305 out:
5306 spin_unlock(&iommu->lock);
5307 spin_unlock_irqrestore(&device_domain_lock, flags);
5308
5309 return ret;
5310}
5311
5312struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5313{
5314 struct intel_iommu *iommu;
5315 u8 bus, devfn;
5316
5317 if (iommu_dummy(dev)) {
5318 dev_warn(dev,
5319 "No IOMMU translation for device; cannot enable SVM\n");
5320 return NULL;
5321 }
5322
5323 iommu = device_to_iommu(dev, &bus, &devfn);
5324 if ((!iommu)) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005325 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005326 return NULL;
5327 }
5328
5329 if (!iommu->pasid_table) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005330 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005331 return NULL;
5332 }
5333
5334 return iommu;
5335}
5336#endif /* CONFIG_INTEL_IOMMU_SVM */
5337
Joerg Roedelb0119e82017-02-01 13:23:08 +01005338const struct iommu_ops intel_iommu_ops = {
Eric Auger0659b8d2017-01-19 20:57:53 +00005339 .capable = intel_iommu_capable,
5340 .domain_alloc = intel_iommu_domain_alloc,
5341 .domain_free = intel_iommu_domain_free,
5342 .attach_dev = intel_iommu_attach_device,
5343 .detach_dev = intel_iommu_detach_device,
5344 .map = intel_iommu_map,
5345 .unmap = intel_iommu_unmap,
5346 .map_sg = default_iommu_map_sg,
5347 .iova_to_phys = intel_iommu_iova_to_phys,
5348 .add_device = intel_iommu_add_device,
5349 .remove_device = intel_iommu_remove_device,
5350 .get_resv_regions = intel_iommu_get_resv_regions,
5351 .put_resv_regions = intel_iommu_put_resv_regions,
5352 .device_group = pci_device_group,
5353 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005354};
David Woodhouse9af88142009-02-13 23:18:03 +00005355
Daniel Vetter94526182013-01-20 23:50:13 +01005356static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5357{
5358 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005359 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01005360 dmar_map_gfx = 0;
5361}
5362
5363DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5365DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5366DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5367DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5369DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5370
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005371static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00005372{
5373 /*
5374 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01005375 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00005376 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005377 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00005378 rwbf_quirk = 1;
5379}
5380
5381DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01005382DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5383DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5384DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5385DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5386DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5387DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07005388
Adam Jacksoneecfd572010-08-25 21:17:34 +01005389#define GGC 0x52
5390#define GGC_MEMORY_SIZE_MASK (0xf << 8)
5391#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5392#define GGC_MEMORY_SIZE_1M (0x1 << 8)
5393#define GGC_MEMORY_SIZE_2M (0x3 << 8)
5394#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5395#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5396#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5397#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5398
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005399static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01005400{
5401 unsigned short ggc;
5402
Adam Jacksoneecfd572010-08-25 21:17:34 +01005403 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01005404 return;
5405
Adam Jacksoneecfd572010-08-25 21:17:34 +01005406 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005407 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01005408 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005409 } else if (dmar_map_gfx) {
5410 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005411 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005412 intel_iommu_strict = 1;
5413 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01005414}
5415DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5416DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5417DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5418DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5419
David Woodhousee0fc7e02009-09-30 09:12:17 -07005420/* On Tylersburg chipsets, some BIOSes have been known to enable the
5421 ISOCH DMAR unit for the Azalia sound device, but not give it any
5422 TLB entries, which causes it to deadlock. Check for that. We do
5423 this in a function called from init_dmars(), instead of in a PCI
5424 quirk, because we don't want to print the obnoxious "BIOS broken"
5425 message if VT-d is actually disabled.
5426*/
5427static void __init check_tylersburg_isoch(void)
5428{
5429 struct pci_dev *pdev;
5430 uint32_t vtisochctrl;
5431
5432 /* If there's no Azalia in the system anyway, forget it. */
5433 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5434 if (!pdev)
5435 return;
5436 pci_dev_put(pdev);
5437
5438 /* System Management Registers. Might be hidden, in which case
5439 we can't do the sanity check. But that's OK, because the
5440 known-broken BIOSes _don't_ actually hide it, so far. */
5441 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5442 if (!pdev)
5443 return;
5444
5445 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5446 pci_dev_put(pdev);
5447 return;
5448 }
5449
5450 pci_dev_put(pdev);
5451
5452 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5453 if (vtisochctrl & 1)
5454 return;
5455
5456 /* Drop all bits other than the number of TLB entries */
5457 vtisochctrl &= 0x1c;
5458
5459 /* If we have the recommended number of TLB entries (16), fine. */
5460 if (vtisochctrl == 0x10)
5461 return;
5462
5463 /* Zero TLB entries? You get to ride the short bus to school. */
5464 if (!vtisochctrl) {
5465 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5466 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5467 dmi_get_system_info(DMI_BIOS_VENDOR),
5468 dmi_get_system_info(DMI_BIOS_VERSION),
5469 dmi_get_system_info(DMI_PRODUCT_VERSION));
5470 iommu_identity_mapping |= IDENTMAP_AZALIA;
5471 return;
5472 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005473
5474 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005475 vtisochctrl);
5476}