blob: 238ad3447712d263ef9d67a109c2d86c03693a87 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
Omer Pelegaa473242016-04-20 11:33:02 +030036#include <linux/cpu.h>
mark gross5e0d2a62008-03-04 15:22:08 -080037#include <linux/timer.h>
Dan Williamsdfddb962015-10-09 18:16:46 -040038#include <linux/io.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010040#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030041#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010042#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070043#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100044#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020045#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080046#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070047#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020048#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070049#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090051#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052
Joerg Roedel078e1ee2012-09-26 12:44:43 +020053#include "irq_remapping.h"
54
Fenghua Yu5b6985c2008-10-16 18:02:32 -070055#define ROOT_SIZE VTD_PAGE_SIZE
56#define CONTEXT_SIZE VTD_PAGE_SIZE
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000059#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070061#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070062
63#define IOAPIC_RANGE_START (0xfee00000)
64#define IOAPIC_RANGE_END (0xfeefffff)
65#define IOVA_START_ADDR (0x1000)
66
67#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
68
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080070#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070071
David Woodhouse2ebe3152009-09-19 07:34:04 -070072#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070080
Robin Murphy1b722502015-01-12 17:51:15 +000081/* IO virtual address start page frame number */
82#define IOVA_START_PFN (1)
83
Mark McLoughlinf27be032008-11-20 15:49:43 +000084#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070085#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070086#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080087
Andrew Mortondf08cdc2010-09-22 13:05:11 -070088/* page table handling */
89#define LEVEL_STRIDE (9)
90#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
91
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020092/*
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
96 * that we support.
97 *
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
101 *
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
104 *
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
107 */
108#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
109
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700110static inline int agaw_to_level(int agaw)
111{
112 return agaw + 2;
113}
114
115static inline int agaw_to_width(int agaw)
116{
Jiang Liu5c645b32014-01-06 14:18:12 +0800117 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700118}
119
120static inline int width_to_agaw(int width)
121{
Jiang Liu5c645b32014-01-06 14:18:12 +0800122 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700123}
124
125static inline unsigned int level_to_offset_bits(int level)
126{
127 return (level - 1) * LEVEL_STRIDE;
128}
129
130static inline int pfn_level_offset(unsigned long pfn, int level)
131{
132 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133}
134
135static inline unsigned long level_mask(int level)
136{
137 return -1UL << level_to_offset_bits(level);
138}
139
140static inline unsigned long level_size(int level)
141{
142 return 1UL << level_to_offset_bits(level);
143}
144
145static inline unsigned long align_to_level(unsigned long pfn, int level)
146{
147 return (pfn + level_size(level) - 1) & level_mask(level);
148}
David Woodhousefd18de52009-05-10 23:57:41 +0100149
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100150static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
151{
Jiang Liu5c645b32014-01-06 14:18:12 +0800152 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100153}
154
David Woodhousedd4e8312009-06-27 16:21:20 +0100155/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
158{
159 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160}
161
162static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
163{
164 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
165}
166static inline unsigned long page_to_dma_pfn(struct page *pg)
167{
168 return mm_to_dma_pfn(page_to_pfn(pg));
169}
170static inline unsigned long virt_to_dma_pfn(void *p)
171{
172 return page_to_dma_pfn(virt_to_page(p));
173}
174
Weidong Hand9630fe2008-12-08 11:06:32 +0800175/* global iommu list, set NULL for ignored DMAR units */
176static struct intel_iommu **g_iommus;
177
David Woodhousee0fc7e02009-09-30 09:12:17 -0700178static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000179static int rwbf_quirk;
180
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
184 */
185static int force_on = 0;
186
187/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000188 * 0: Present
189 * 1-11: Reserved
190 * 12-63: Context Ptr (12 - (haw-1))
191 * 64-127: Reserved
192 */
193struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000194 u64 lo;
195 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000196};
197#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000198
Joerg Roedel091d42e2015-06-12 11:56:10 +0200199/*
200 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
201 * if marked present.
202 */
203static phys_addr_t root_entry_lctp(struct root_entry *re)
204{
205 if (!(re->lo & 1))
206 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000207
Joerg Roedel091d42e2015-06-12 11:56:10 +0200208 return re->lo & VTD_PAGE_MASK;
209}
210
211/*
212 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
213 * if marked present.
214 */
215static phys_addr_t root_entry_uctp(struct root_entry *re)
216{
217 if (!(re->hi & 1))
218 return 0;
219
220 return re->hi & VTD_PAGE_MASK;
221}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000222/*
223 * low 64 bits:
224 * 0: present
225 * 1: fault processing disable
226 * 2-3: translation type
227 * 12-63: address space root
228 * high 64 bits:
229 * 0-2: address width
230 * 3-6: aval
231 * 8-23: domain id
232 */
233struct context_entry {
234 u64 lo;
235 u64 hi;
236};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000237
Joerg Roedelcf484d02015-06-12 12:21:46 +0200238static inline void context_clear_pasid_enable(struct context_entry *context)
239{
240 context->lo &= ~(1ULL << 11);
241}
242
243static inline bool context_pasid_enabled(struct context_entry *context)
244{
245 return !!(context->lo & (1ULL << 11));
246}
247
248static inline void context_set_copied(struct context_entry *context)
249{
250 context->hi |= (1ull << 3);
251}
252
253static inline bool context_copied(struct context_entry *context)
254{
255 return !!(context->hi & (1ULL << 3));
256}
257
258static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000259{
260 return (context->lo & 1);
261}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200262
263static inline bool context_present(struct context_entry *context)
264{
265 return context_pasid_enabled(context) ?
266 __context_present(context) :
267 __context_present(context) && !context_copied(context);
268}
269
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000270static inline void context_set_present(struct context_entry *context)
271{
272 context->lo |= 1;
273}
274
275static inline void context_set_fault_enable(struct context_entry *context)
276{
277 context->lo &= (((u64)-1) << 2) | 1;
278}
279
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000280static inline void context_set_translation_type(struct context_entry *context,
281 unsigned long value)
282{
283 context->lo &= (((u64)-1) << 4) | 3;
284 context->lo |= (value & 3) << 2;
285}
286
287static inline void context_set_address_root(struct context_entry *context,
288 unsigned long value)
289{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800290 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000291 context->lo |= value & VTD_PAGE_MASK;
292}
293
294static inline void context_set_address_width(struct context_entry *context,
295 unsigned long value)
296{
297 context->hi |= value & 7;
298}
299
300static inline void context_set_domain_id(struct context_entry *context,
301 unsigned long value)
302{
303 context->hi |= (value & ((1 << 16) - 1)) << 8;
304}
305
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200306static inline int context_domain_id(struct context_entry *c)
307{
308 return((c->hi >> 8) & 0xffff);
309}
310
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000311static inline void context_clear_entry(struct context_entry *context)
312{
313 context->lo = 0;
314 context->hi = 0;
315}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000316
Mark McLoughlin622ba122008-11-20 15:49:46 +0000317/*
318 * 0: readable
319 * 1: writable
320 * 2-6: reserved
321 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800322 * 8-10: available
323 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000324 * 12-63: Host physcial address
325 */
326struct dma_pte {
327 u64 val;
328};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000329
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000330static inline void dma_clear_pte(struct dma_pte *pte)
331{
332 pte->val = 0;
333}
334
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000335static inline u64 dma_pte_addr(struct dma_pte *pte)
336{
David Woodhousec85994e2009-07-01 19:21:24 +0100337#ifdef CONFIG_64BIT
338 return pte->val & VTD_PAGE_MASK;
339#else
340 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100341 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100342#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000343}
344
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000345static inline bool dma_pte_present(struct dma_pte *pte)
346{
347 return (pte->val & 3) != 0;
348}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000349
Allen Kay4399c8b2011-10-14 12:32:46 -0700350static inline bool dma_pte_superpage(struct dma_pte *pte)
351{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200352 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700353}
354
David Woodhouse75e6bf92009-07-02 11:21:16 +0100355static inline int first_pte_in_page(struct dma_pte *pte)
356{
357 return !((unsigned long)pte & ~VTD_PAGE_MASK);
358}
359
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700360/*
361 * This domain is a statically identity mapping domain.
362 * 1. This domain creats a static 1:1 mapping to all usable memory.
363 * 2. It maps to each iommu if successful.
364 * 3. Each iommu mapps to this domain if successful.
365 */
David Woodhouse19943b02009-08-04 16:19:20 +0100366static struct dmar_domain *si_domain;
367static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700368
Joerg Roedel28ccce02015-07-21 14:45:31 +0200369/*
370 * Domain represents a virtual machine, more than one devices
Weidong Han1ce28fe2008-12-08 16:35:39 +0800371 * across iommus may be owned in one domain, e.g. kvm guest.
372 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800373#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800374
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700375/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800376#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700377
Joerg Roedel29a27712015-07-21 17:17:12 +0200378#define for_each_domain_iommu(idx, domain) \
379 for (idx = 0; idx < g_num_of_iommus; idx++) \
380 if (domain->iommu_refcnt[idx])
381
Mark McLoughlin99126f72008-11-20 15:49:47 +0000382struct dmar_domain {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700383 int nid; /* node id */
Joerg Roedel29a27712015-07-21 17:17:12 +0200384
385 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
386 /* Refcount of devices per iommu */
387
Mark McLoughlin99126f72008-11-20 15:49:47 +0000388
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +0200389 u16 iommu_did[DMAR_UNITS_SUPPORTED];
390 /* Domain ids per IOMMU. Use u16 since
391 * domain ids are 16 bit wide according
392 * to VT-d spec, section 9.3 */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000393
Omer Peleg0824c592016-04-20 19:03:35 +0300394 bool has_iotlb_device;
Joerg Roedel00a77de2015-03-26 13:43:08 +0100395 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000396 struct iova_domain iovad; /* iova's that belong to this domain */
397
398 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000399 int gaw; /* max guest address width */
400
401 /* adjusted guest address width, 0 is level 2 30-bit */
402 int agaw;
403
Weidong Han3b5410e2008-12-08 09:17:15 +0800404 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800405
406 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800407 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800408 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100409 int iommu_superpage;/* Level of superpages supported:
410 0 == 4KiB (no superpages), 1 == 2MiB,
411 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800412 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100413
414 struct iommu_domain domain; /* generic domain data structure for
415 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000416};
417
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000418/* PCI domain-device relationship */
419struct device_domain_info {
420 struct list_head link; /* link to domain siblings */
421 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100422 u8 bus; /* PCI bus number */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000423 u8 devfn; /* PCI devfn number */
David Woodhouseb16d0cb2015-10-12 14:17:37 +0100424 u8 pasid_supported:3;
425 u8 pasid_enabled:1;
426 u8 pri_supported:1;
427 u8 pri_enabled:1;
428 u8 ats_supported:1;
429 u8 ats_enabled:1;
430 u8 ats_qdep;
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000431 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800432 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000433 struct dmar_domain *domain; /* pointer to domain */
434};
435
Jiang Liub94e4112014-02-19 14:07:25 +0800436struct dmar_rmrr_unit {
437 struct list_head list; /* list of rmrr units */
438 struct acpi_dmar_header *hdr; /* ACPI header */
439 u64 base_address; /* reserved base address*/
440 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000441 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800442 int devices_cnt; /* target device count */
Eric Auger0659b8d2017-01-19 20:57:53 +0000443 struct iommu_resv_region *resv; /* reserved region handle */
Jiang Liub94e4112014-02-19 14:07:25 +0800444};
445
446struct dmar_atsr_unit {
447 struct list_head list; /* list of ATSR units */
448 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000449 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800450 int devices_cnt; /* target device count */
451 u8 include_all:1; /* include all ports */
452};
453
454static LIST_HEAD(dmar_atsr_units);
455static LIST_HEAD(dmar_rmrr_units);
456
457#define for_each_rmrr_units(rmrr) \
458 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
459
mark gross5e0d2a62008-03-04 15:22:08 -0800460static void flush_unmaps_timeout(unsigned long data);
461
Omer Peleg314f1dc2016-04-20 11:32:45 +0300462struct deferred_flush_entry {
Omer Peleg2aac6302016-04-20 11:33:57 +0300463 unsigned long iova_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +0300464 unsigned long nrpages;
Omer Peleg314f1dc2016-04-20 11:32:45 +0300465 struct dmar_domain *domain;
466 struct page *freelist;
mark gross80b20dd2008-04-18 13:53:58 -0700467};
468
Omer Peleg314f1dc2016-04-20 11:32:45 +0300469#define HIGH_WATER_MARK 250
470struct deferred_flush_table {
471 int next;
472 struct deferred_flush_entry entries[HIGH_WATER_MARK];
473};
474
Omer Pelegaa473242016-04-20 11:33:02 +0300475struct deferred_flush_data {
476 spinlock_t lock;
477 int timer_on;
478 struct timer_list timer;
479 long size;
480 struct deferred_flush_table *tables;
481};
482
483DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
mark gross80b20dd2008-04-18 13:53:58 -0700484
mark gross5e0d2a62008-03-04 15:22:08 -0800485/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800486static int g_num_of_iommus;
487
Jiang Liu92d03cc2014-02-19 14:07:28 +0800488static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700489static void domain_remove_dev_info(struct dmar_domain *domain);
Joerg Roedele6de0f82015-07-22 16:30:36 +0200490static void dmar_remove_one_dev_info(struct dmar_domain *domain,
491 struct device *dev);
Joerg Roedel127c7612015-07-23 17:44:46 +0200492static void __dmar_remove_one_dev_info(struct device_domain_info *info);
Joerg Roedel2452d9d2015-07-23 16:20:14 +0200493static void domain_context_clear(struct intel_iommu *iommu,
494 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800495static int domain_detach_iommu(struct dmar_domain *domain,
496 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700497
Suresh Siddhad3f13812011-08-23 17:05:25 -0700498#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800499int dmar_disabled = 0;
500#else
501int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700502#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800503
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200504int intel_iommu_enabled = 0;
505EXPORT_SYMBOL_GPL(intel_iommu_enabled);
506
David Woodhouse2d9e6672010-06-15 10:57:57 +0100507static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700508static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800509static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100510static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100511static int intel_iommu_ecs = 1;
David Woodhouseae853dd2015-09-09 11:58:59 +0100512static int intel_iommu_pasid28;
513static int iommu_identity_mapping;
David Woodhousec83b2f22015-06-12 10:15:49 +0100514
David Woodhouseae853dd2015-09-09 11:58:59 +0100515#define IDENTMAP_ALL 1
516#define IDENTMAP_GFX 2
517#define IDENTMAP_AZALIA 4
David Woodhousec83b2f22015-06-12 10:15:49 +0100518
David Woodhoused42fde72015-10-24 21:33:01 +0200519/* Broadwell and Skylake have broken ECS support — normal so-called "second
520 * level" translation of DMA requests-without-PASID doesn't actually happen
521 * unless you also set the NESTE bit in an extended context-entry. Which of
522 * course means that SVM doesn't work because it's trying to do nested
523 * translation of the physical addresses it finds in the process page tables,
524 * through the IOVA->phys mapping found in the "second level" page tables.
525 *
526 * The VT-d specification was retroactively changed to change the definition
527 * of the capability bits and pretend that Broadwell/Skylake never happened...
528 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
529 * for some reason it was the PASID capability bit which was redefined (from
530 * bit 28 on BDW/SKL to bit 40 in future).
531 *
532 * So our test for ECS needs to eschew those implementations which set the old
533 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
534 * Unless we are working around the 'pasid28' limitations, that is, by putting
535 * the device into passthrough mode for normal DMA and thus masking the bug.
536 */
David Woodhousec83b2f22015-06-12 10:15:49 +0100537#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
David Woodhoused42fde72015-10-24 21:33:01 +0200538 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
539/* PASID support is thus enabled if ECS is enabled and *either* of the old
540 * or new capability bits are set. */
541#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
542 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700543
David Woodhousec0771df2011-10-14 20:59:46 +0100544int intel_iommu_gfx_mapped;
545EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
546
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700547#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
548static DEFINE_SPINLOCK(device_domain_lock);
549static LIST_HEAD(device_domain_list);
550
Joerg Roedelb0119e82017-02-01 13:23:08 +0100551const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100552
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200553static bool translation_pre_enabled(struct intel_iommu *iommu)
554{
555 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
556}
557
Joerg Roedel091d42e2015-06-12 11:56:10 +0200558static void clear_translation_pre_enabled(struct intel_iommu *iommu)
559{
560 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
561}
562
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200563static void init_translation_status(struct intel_iommu *iommu)
564{
565 u32 gsts;
566
567 gsts = readl(iommu->reg + DMAR_GSTS_REG);
568 if (gsts & DMA_GSTS_TES)
569 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
570}
571
Joerg Roedel00a77de2015-03-26 13:43:08 +0100572/* Convert generic 'struct iommu_domain to private struct dmar_domain */
573static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
574{
575 return container_of(dom, struct dmar_domain, domain);
576}
577
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700578static int __init intel_iommu_setup(char *str)
579{
580 if (!str)
581 return -EINVAL;
582 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800583 if (!strncmp(str, "on", 2)) {
584 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200585 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800586 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700587 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200588 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700589 } else if (!strncmp(str, "igfx_off", 8)) {
590 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200591 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700592 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200593 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700594 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800595 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200596 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800597 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100598 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200599 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100600 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100601 } else if (!strncmp(str, "ecs_off", 7)) {
602 printk(KERN_INFO
603 "Intel-IOMMU: disable extended context table support\n");
604 intel_iommu_ecs = 0;
David Woodhouseae853dd2015-09-09 11:58:59 +0100605 } else if (!strncmp(str, "pasid28", 7)) {
606 printk(KERN_INFO
607 "Intel-IOMMU: enable pre-production PASID support\n");
608 intel_iommu_pasid28 = 1;
609 iommu_identity_mapping |= IDENTMAP_GFX;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700610 }
611
612 str += strcspn(str, ",");
613 while (*str == ',')
614 str++;
615 }
616 return 0;
617}
618__setup("intel_iommu=", intel_iommu_setup);
619
620static struct kmem_cache *iommu_domain_cache;
621static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700622
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200623static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
624{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200625 struct dmar_domain **domains;
626 int idx = did >> 8;
627
628 domains = iommu->domains[idx];
629 if (!domains)
630 return NULL;
631
632 return domains[did & 0xff];
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200633}
634
635static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
636 struct dmar_domain *domain)
637{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200638 struct dmar_domain **domains;
639 int idx = did >> 8;
640
641 if (!iommu->domains[idx]) {
642 size_t size = 256 * sizeof(struct dmar_domain *);
643 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
644 }
645
646 domains = iommu->domains[idx];
647 if (WARN_ON(!domains))
648 return;
649 else
650 domains[did & 0xff] = domain;
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200651}
652
Suresh Siddha4c923d42009-10-02 11:01:24 -0700653static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700654{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700655 struct page *page;
656 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700657
Suresh Siddha4c923d42009-10-02 11:01:24 -0700658 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
659 if (page)
660 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700661 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700662}
663
664static inline void free_pgtable_page(void *vaddr)
665{
666 free_page((unsigned long)vaddr);
667}
668
669static inline void *alloc_domain_mem(void)
670{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900671 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700672}
673
Kay, Allen M38717942008-09-09 18:37:29 +0300674static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700675{
676 kmem_cache_free(iommu_domain_cache, vaddr);
677}
678
679static inline void * alloc_devinfo_mem(void)
680{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900681 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700682}
683
684static inline void free_devinfo_mem(void *vaddr)
685{
686 kmem_cache_free(iommu_devinfo_cache, vaddr);
687}
688
Jiang Liuab8dfe22014-07-11 14:19:27 +0800689static inline int domain_type_is_vm(struct dmar_domain *domain)
690{
691 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
692}
693
Joerg Roedel28ccce02015-07-21 14:45:31 +0200694static inline int domain_type_is_si(struct dmar_domain *domain)
695{
696 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
697}
698
Jiang Liuab8dfe22014-07-11 14:19:27 +0800699static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
700{
701 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
702 DOMAIN_FLAG_STATIC_IDENTITY);
703}
Weidong Han1b573682008-12-08 15:34:06 +0800704
Jiang Liu162d1b12014-07-11 14:19:35 +0800705static inline int domain_pfn_supported(struct dmar_domain *domain,
706 unsigned long pfn)
707{
708 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
709
710 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
711}
712
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700713static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800714{
715 unsigned long sagaw;
716 int agaw = -1;
717
718 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700719 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800720 agaw >= 0; agaw--) {
721 if (test_bit(agaw, &sagaw))
722 break;
723 }
724
725 return agaw;
726}
727
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700728/*
729 * Calculate max SAGAW for each iommu.
730 */
731int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
732{
733 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
734}
735
736/*
737 * calculate agaw for each iommu.
738 * "SAGAW" may be different across iommus, use a default agaw, and
739 * get a supported less agaw for iommus that don't support the default agaw.
740 */
741int iommu_calculate_agaw(struct intel_iommu *iommu)
742{
743 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
744}
745
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700746/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800747static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
748{
749 int iommu_id;
750
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700751 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800752 BUG_ON(domain_type_is_vm_or_si(domain));
Joerg Roedel29a27712015-07-21 17:17:12 +0200753 for_each_domain_iommu(iommu_id, domain)
754 break;
755
Weidong Han8c11e792008-12-08 15:29:22 +0800756 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
757 return NULL;
758
759 return g_iommus[iommu_id];
760}
761
Weidong Han8e6040972008-12-08 15:49:06 +0800762static void domain_update_iommu_coherency(struct dmar_domain *domain)
763{
David Woodhoused0501962014-03-11 17:10:29 -0700764 struct dmar_drhd_unit *drhd;
765 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100766 bool found = false;
767 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800768
David Woodhoused0501962014-03-11 17:10:29 -0700769 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800770
Joerg Roedel29a27712015-07-21 17:17:12 +0200771 for_each_domain_iommu(i, domain) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100772 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800773 if (!ecap_coherent(g_iommus[i]->ecap)) {
774 domain->iommu_coherency = 0;
775 break;
776 }
Weidong Han8e6040972008-12-08 15:49:06 +0800777 }
David Woodhoused0501962014-03-11 17:10:29 -0700778 if (found)
779 return;
780
781 /* No hardware attached; use lowest common denominator */
782 rcu_read_lock();
783 for_each_active_iommu(iommu, drhd) {
784 if (!ecap_coherent(iommu->ecap)) {
785 domain->iommu_coherency = 0;
786 break;
787 }
788 }
789 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800790}
791
Jiang Liu161f6932014-07-11 14:19:37 +0800792static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100793{
Allen Kay8140a952011-10-14 12:32:17 -0700794 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800795 struct intel_iommu *iommu;
796 int ret = 1;
797
798 rcu_read_lock();
799 for_each_active_iommu(iommu, drhd) {
800 if (iommu != skip) {
801 if (!ecap_sc_support(iommu->ecap)) {
802 ret = 0;
803 break;
804 }
805 }
806 }
807 rcu_read_unlock();
808
809 return ret;
810}
811
812static int domain_update_iommu_superpage(struct intel_iommu *skip)
813{
814 struct dmar_drhd_unit *drhd;
815 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700816 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100817
818 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800819 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100820 }
821
Allen Kay8140a952011-10-14 12:32:17 -0700822 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800823 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700824 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800825 if (iommu != skip) {
826 mask &= cap_super_page_val(iommu->cap);
827 if (!mask)
828 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100829 }
830 }
Jiang Liu0e242612014-02-19 14:07:34 +0800831 rcu_read_unlock();
832
Jiang Liu161f6932014-07-11 14:19:37 +0800833 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100834}
835
Sheng Yang58c610b2009-03-18 15:33:05 +0800836/* Some capabilities may be different across iommus */
837static void domain_update_iommu_cap(struct dmar_domain *domain)
838{
839 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800840 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
841 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800842}
843
David Woodhouse03ecc322015-02-13 14:35:21 +0000844static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
845 u8 bus, u8 devfn, int alloc)
846{
847 struct root_entry *root = &iommu->root_entry[bus];
848 struct context_entry *context;
849 u64 *entry;
850
Joerg Roedel4df4eab2015-08-25 10:54:28 +0200851 entry = &root->lo;
David Woodhousec83b2f22015-06-12 10:15:49 +0100852 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000853 if (devfn >= 0x80) {
854 devfn -= 0x80;
855 entry = &root->hi;
856 }
857 devfn *= 2;
858 }
David Woodhouse03ecc322015-02-13 14:35:21 +0000859 if (*entry & 1)
860 context = phys_to_virt(*entry & VTD_PAGE_MASK);
861 else {
862 unsigned long phy_addr;
863 if (!alloc)
864 return NULL;
865
866 context = alloc_pgtable_page(iommu->node);
867 if (!context)
868 return NULL;
869
870 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
871 phy_addr = virt_to_phys((void *)context);
872 *entry = phy_addr | 1;
873 __iommu_flush_cache(iommu, entry, sizeof(*entry));
874 }
875 return &context[devfn];
876}
877
David Woodhouse4ed6a542015-05-11 14:59:20 +0100878static int iommu_dummy(struct device *dev)
879{
880 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
881}
882
David Woodhouse156baca2014-03-09 14:00:57 -0700883static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800884{
885 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800886 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700887 struct device *tmp;
888 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800889 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800890 int i;
891
David Woodhouse4ed6a542015-05-11 14:59:20 +0100892 if (iommu_dummy(dev))
893 return NULL;
894
David Woodhouse156baca2014-03-09 14:00:57 -0700895 if (dev_is_pci(dev)) {
Ashok Raj1c387182016-10-21 15:32:05 -0700896 struct pci_dev *pf_pdev;
897
David Woodhouse156baca2014-03-09 14:00:57 -0700898 pdev = to_pci_dev(dev);
Ashok Raj1c387182016-10-21 15:32:05 -0700899 /* VFs aren't listed in scope tables; we need to look up
900 * the PF instead to find the IOMMU. */
901 pf_pdev = pci_physfn(pdev);
902 dev = &pf_pdev->dev;
David Woodhouse156baca2014-03-09 14:00:57 -0700903 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100904 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700905 dev = &ACPI_COMPANION(dev)->dev;
906
Jiang Liu0e242612014-02-19 14:07:34 +0800907 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800908 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700909 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100910 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800911
Jiang Liub683b232014-02-19 14:07:32 +0800912 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700913 drhd->devices_cnt, i, tmp) {
914 if (tmp == dev) {
Ashok Raj1c387182016-10-21 15:32:05 -0700915 /* For a VF use its original BDF# not that of the PF
916 * which we used for the IOMMU lookup. Strictly speaking
917 * we could do this for all PCI devices; we only need to
918 * get the BDF# from the scope table for ACPI matches. */
919 if (pdev->is_virtfn)
920 goto got_pdev;
921
David Woodhouse156baca2014-03-09 14:00:57 -0700922 *bus = drhd->devices[i].bus;
923 *devfn = drhd->devices[i].devfn;
924 goto out;
925 }
926
927 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000928 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700929
930 ptmp = to_pci_dev(tmp);
931 if (ptmp->subordinate &&
932 ptmp->subordinate->number <= pdev->bus->number &&
933 ptmp->subordinate->busn_res.end >= pdev->bus->number)
934 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100935 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800936
David Woodhouse156baca2014-03-09 14:00:57 -0700937 if (pdev && drhd->include_all) {
938 got_pdev:
939 *bus = pdev->bus->number;
940 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800941 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700942 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800943 }
Jiang Liub683b232014-02-19 14:07:32 +0800944 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700945 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800946 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800947
Jiang Liub683b232014-02-19 14:07:32 +0800948 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800949}
950
Weidong Han5331fe62008-12-08 23:00:00 +0800951static void domain_flush_cache(struct dmar_domain *domain,
952 void *addr, int size)
953{
954 if (!domain->iommu_coherency)
955 clflush_cache_range(addr, size);
956}
957
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
959{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700960 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000961 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700962 unsigned long flags;
963
964 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000965 context = iommu_context_addr(iommu, bus, devfn, 0);
966 if (context)
967 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968 spin_unlock_irqrestore(&iommu->lock, flags);
969 return ret;
970}
971
972static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
973{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700974 struct context_entry *context;
975 unsigned long flags;
976
977 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000978 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000980 context_clear_entry(context);
981 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982 }
983 spin_unlock_irqrestore(&iommu->lock, flags);
984}
985
986static void free_context_table(struct intel_iommu *iommu)
987{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988 int i;
989 unsigned long flags;
990 struct context_entry *context;
991
992 spin_lock_irqsave(&iommu->lock, flags);
993 if (!iommu->root_entry) {
994 goto out;
995 }
996 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000997 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998 if (context)
999 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +00001000
David Woodhousec83b2f22015-06-12 10:15:49 +01001001 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001002 continue;
1003
1004 context = iommu_context_addr(iommu, i, 0x80, 0);
1005 if (context)
1006 free_pgtable_page(context);
1007
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008 }
1009 free_pgtable_page(iommu->root_entry);
1010 iommu->root_entry = NULL;
1011out:
1012 spin_unlock_irqrestore(&iommu->lock, flags);
1013}
1014
David Woodhouseb026fd22009-06-28 10:37:25 +01001015static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +00001016 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001018 struct dma_pte *parent, *pte = NULL;
1019 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -07001020 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001021
1022 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +02001023
Jiang Liu162d1b12014-07-11 14:19:35 +08001024 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +02001025 /* Address beyond IOMMU's addressing capabilities. */
1026 return NULL;
1027
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001028 parent = domain->pgd;
1029
David Woodhouse5cf0a762014-03-19 16:07:49 +00001030 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001031 void *tmp_page;
1032
David Woodhouseb026fd22009-06-28 10:37:25 +01001033 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001034 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +00001035 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001036 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +00001037 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001038 break;
1039
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001040 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +01001041 uint64_t pteval;
1042
Suresh Siddha4c923d42009-10-02 11:01:24 -07001043 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001044
David Woodhouse206a73c2009-07-01 19:30:28 +01001045 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +01001047
David Woodhousec85994e2009-07-01 19:21:24 +01001048 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -04001049 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +08001050 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +01001051 /* Someone else set it while we were thinking; use theirs. */
1052 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +08001053 else
David Woodhousec85994e2009-07-01 19:21:24 +01001054 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001055 }
David Woodhouse5cf0a762014-03-19 16:07:49 +00001056 if (level == 1)
1057 break;
1058
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001059 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001060 level--;
1061 }
1062
David Woodhouse5cf0a762014-03-19 16:07:49 +00001063 if (!*target_level)
1064 *target_level = level;
1065
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001066 return pte;
1067}
1068
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001069
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001070/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +01001071static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1072 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001073 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001074{
1075 struct dma_pte *parent, *pte = NULL;
1076 int total = agaw_to_level(domain->agaw);
1077 int offset;
1078
1079 parent = domain->pgd;
1080 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +01001081 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001082 pte = &parent[offset];
1083 if (level == total)
1084 return pte;
1085
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001086 if (!dma_pte_present(pte)) {
1087 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001088 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001089 }
1090
Yijing Wange16922a2014-05-20 20:37:51 +08001091 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001092 *large_page = total;
1093 return pte;
1094 }
1095
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001096 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001097 total--;
1098 }
1099 return NULL;
1100}
1101
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001102/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001103static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +01001104 unsigned long start_pfn,
1105 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001106{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001107 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001108 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001109
Jiang Liu162d1b12014-07-11 14:19:35 +08001110 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1111 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001112 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001113
David Woodhouse04b18e62009-06-27 19:15:01 +01001114 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001115 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001116 large_page = 1;
1117 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001118 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001119 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001120 continue;
1121 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001122 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001123 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001124 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001125 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001126 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1127
David Woodhouse310a5ab2009-06-28 18:52:20 +01001128 domain_flush_cache(domain, first_pte,
1129 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001130
1131 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001132}
1133
Alex Williamson3269ee02013-06-15 10:27:19 -06001134static void dma_pte_free_level(struct dmar_domain *domain, int level,
1135 struct dma_pte *pte, unsigned long pfn,
1136 unsigned long start_pfn, unsigned long last_pfn)
1137{
1138 pfn = max(start_pfn, pfn);
1139 pte = &pte[pfn_level_offset(pfn, level)];
1140
1141 do {
1142 unsigned long level_pfn;
1143 struct dma_pte *level_pte;
1144
1145 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1146 goto next;
1147
David Dillowf7116e12017-01-30 19:11:11 -08001148 level_pfn = pfn & level_mask(level);
Alex Williamson3269ee02013-06-15 10:27:19 -06001149 level_pte = phys_to_virt(dma_pte_addr(pte));
1150
1151 if (level > 2)
1152 dma_pte_free_level(domain, level - 1, level_pte,
1153 level_pfn, start_pfn, last_pfn);
1154
1155 /* If range covers entire pagetable, free it */
1156 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001157 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001158 dma_clear_pte(pte);
1159 domain_flush_cache(domain, pte, sizeof(*pte));
1160 free_pgtable_page(level_pte);
1161 }
1162next:
1163 pfn += level_size(level);
1164 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1165}
1166
Michael S. Tsirkin3d1a2442016-03-23 20:34:19 +02001167/* clear last level (leaf) ptes and free page table pages. */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001169 unsigned long start_pfn,
1170 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171{
Jiang Liu162d1b12014-07-11 14:19:35 +08001172 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1173 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001174 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175
Jiang Liud41a4ad2014-07-11 14:19:34 +08001176 dma_pte_clear_range(domain, start_pfn, last_pfn);
1177
David Woodhousef3a0a522009-06-30 03:40:07 +01001178 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001179 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1180 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001181
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001183 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001184 free_pgtable_page(domain->pgd);
1185 domain->pgd = NULL;
1186 }
1187}
1188
David Woodhouseea8ea462014-03-05 17:09:32 +00001189/* When a page at a given level is being unlinked from its parent, we don't
1190 need to *modify* it at all. All we need to do is make a list of all the
1191 pages which can be freed just as soon as we've flushed the IOTLB and we
1192 know the hardware page-walk will no longer touch them.
1193 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1194 be freed. */
1195static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1196 int level, struct dma_pte *pte,
1197 struct page *freelist)
1198{
1199 struct page *pg;
1200
1201 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1202 pg->freelist = freelist;
1203 freelist = pg;
1204
1205 if (level == 1)
1206 return freelist;
1207
Jiang Liuadeb2592014-04-09 10:20:39 +08001208 pte = page_address(pg);
1209 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001210 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1211 freelist = dma_pte_list_pagetables(domain, level - 1,
1212 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001213 pte++;
1214 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001215
1216 return freelist;
1217}
1218
1219static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1220 struct dma_pte *pte, unsigned long pfn,
1221 unsigned long start_pfn,
1222 unsigned long last_pfn,
1223 struct page *freelist)
1224{
1225 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1226
1227 pfn = max(start_pfn, pfn);
1228 pte = &pte[pfn_level_offset(pfn, level)];
1229
1230 do {
1231 unsigned long level_pfn;
1232
1233 if (!dma_pte_present(pte))
1234 goto next;
1235
1236 level_pfn = pfn & level_mask(level);
1237
1238 /* If range covers entire pagetable, free it */
1239 if (start_pfn <= level_pfn &&
1240 last_pfn >= level_pfn + level_size(level) - 1) {
1241 /* These suborbinate page tables are going away entirely. Don't
1242 bother to clear them; we're just going to *free* them. */
1243 if (level > 1 && !dma_pte_superpage(pte))
1244 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1245
1246 dma_clear_pte(pte);
1247 if (!first_pte)
1248 first_pte = pte;
1249 last_pte = pte;
1250 } else if (level > 1) {
1251 /* Recurse down into a level that isn't *entirely* obsolete */
1252 freelist = dma_pte_clear_level(domain, level - 1,
1253 phys_to_virt(dma_pte_addr(pte)),
1254 level_pfn, start_pfn, last_pfn,
1255 freelist);
1256 }
1257next:
1258 pfn += level_size(level);
1259 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1260
1261 if (first_pte)
1262 domain_flush_cache(domain, first_pte,
1263 (void *)++last_pte - (void *)first_pte);
1264
1265 return freelist;
1266}
1267
1268/* We can't just free the pages because the IOMMU may still be walking
1269 the page tables, and may have cached the intermediate levels. The
1270 pages can only be freed after the IOTLB flush has been done. */
Joerg Roedelb6904202015-08-13 11:32:18 +02001271static struct page *domain_unmap(struct dmar_domain *domain,
1272 unsigned long start_pfn,
1273 unsigned long last_pfn)
David Woodhouseea8ea462014-03-05 17:09:32 +00001274{
David Woodhouseea8ea462014-03-05 17:09:32 +00001275 struct page *freelist = NULL;
1276
Jiang Liu162d1b12014-07-11 14:19:35 +08001277 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1278 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001279 BUG_ON(start_pfn > last_pfn);
1280
1281 /* we don't need lock here; nobody else touches the iova range */
1282 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1283 domain->pgd, 0, start_pfn, last_pfn, NULL);
1284
1285 /* free pgd */
1286 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1287 struct page *pgd_page = virt_to_page(domain->pgd);
1288 pgd_page->freelist = freelist;
1289 freelist = pgd_page;
1290
1291 domain->pgd = NULL;
1292 }
1293
1294 return freelist;
1295}
1296
Joerg Roedelb6904202015-08-13 11:32:18 +02001297static void dma_free_pagelist(struct page *freelist)
David Woodhouseea8ea462014-03-05 17:09:32 +00001298{
1299 struct page *pg;
1300
1301 while ((pg = freelist)) {
1302 freelist = pg->freelist;
1303 free_pgtable_page(page_address(pg));
1304 }
1305}
1306
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307/* iommu handling */
1308static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1309{
1310 struct root_entry *root;
1311 unsigned long flags;
1312
Suresh Siddha4c923d42009-10-02 11:01:24 -07001313 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001314 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001315 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001316 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001318 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001319
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001320 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321
1322 spin_lock_irqsave(&iommu->lock, flags);
1323 iommu->root_entry = root;
1324 spin_unlock_irqrestore(&iommu->lock, flags);
1325
1326 return 0;
1327}
1328
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329static void iommu_set_root_entry(struct intel_iommu *iommu)
1330{
David Woodhouse03ecc322015-02-13 14:35:21 +00001331 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001332 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001333 unsigned long flag;
1334
David Woodhouse03ecc322015-02-13 14:35:21 +00001335 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001336 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001337 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001338
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001339 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001340 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341
David Woodhousec416daa2009-05-10 20:30:58 +01001342 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343
1344 /* Make sure hardware complete it */
1345 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001346 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001348 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349}
1350
1351static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1352{
1353 u32 val;
1354 unsigned long flag;
1355
David Woodhouse9af88142009-02-13 23:18:03 +00001356 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001358
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001359 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001360 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001361
1362 /* Make sure hardware complete it */
1363 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001364 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001366 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001367}
1368
1369/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001370static void __iommu_flush_context(struct intel_iommu *iommu,
1371 u16 did, u16 source_id, u8 function_mask,
1372 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373{
1374 u64 val = 0;
1375 unsigned long flag;
1376
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001377 switch (type) {
1378 case DMA_CCMD_GLOBAL_INVL:
1379 val = DMA_CCMD_GLOBAL_INVL;
1380 break;
1381 case DMA_CCMD_DOMAIN_INVL:
1382 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1383 break;
1384 case DMA_CCMD_DEVICE_INVL:
1385 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1386 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1387 break;
1388 default:
1389 BUG();
1390 }
1391 val |= DMA_CCMD_ICC;
1392
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001393 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1395
1396 /* Make sure hardware complete it */
1397 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1398 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001400 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401}
1402
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001403/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001404static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1405 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406{
1407 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1408 u64 val = 0, val_iva = 0;
1409 unsigned long flag;
1410
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411 switch (type) {
1412 case DMA_TLB_GLOBAL_FLUSH:
1413 /* global flush doesn't need set IVA_REG */
1414 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1415 break;
1416 case DMA_TLB_DSI_FLUSH:
1417 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1418 break;
1419 case DMA_TLB_PSI_FLUSH:
1420 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001421 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422 val_iva = size_order | addr;
1423 break;
1424 default:
1425 BUG();
1426 }
1427 /* Note: set drain read/write */
1428#if 0
1429 /*
1430 * This is probably to be super secure.. Looks like we can
1431 * ignore it without any impact.
1432 */
1433 if (cap_read_drain(iommu->cap))
1434 val |= DMA_TLB_READ_DRAIN;
1435#endif
1436 if (cap_write_drain(iommu->cap))
1437 val |= DMA_TLB_WRITE_DRAIN;
1438
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001439 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 /* Note: Only uses first TLB reg currently */
1441 if (val_iva)
1442 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1443 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1444
1445 /* Make sure hardware complete it */
1446 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1447 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1448
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001449 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450
1451 /* check IOTLB invalidation granularity */
1452 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001453 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001455 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001456 (unsigned long long)DMA_TLB_IIRG(type),
1457 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001458}
1459
David Woodhouse64ae8922014-03-09 12:52:30 -07001460static struct device_domain_info *
1461iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1462 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463{
Yu Zhao93a23a72009-05-18 13:51:37 +08001464 struct device_domain_info *info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001465
Joerg Roedel55d94042015-07-22 16:50:40 +02001466 assert_spin_locked(&device_domain_lock);
1467
Yu Zhao93a23a72009-05-18 13:51:37 +08001468 if (!iommu->qi)
1469 return NULL;
1470
Yu Zhao93a23a72009-05-18 13:51:37 +08001471 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001472 if (info->iommu == iommu && info->bus == bus &&
1473 info->devfn == devfn) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001474 if (info->ats_supported && info->dev)
1475 return info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001476 break;
1477 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001478
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001479 return NULL;
Yu Zhao93a23a72009-05-18 13:51:37 +08001480}
1481
Omer Peleg0824c592016-04-20 19:03:35 +03001482static void domain_update_iotlb(struct dmar_domain *domain)
1483{
1484 struct device_domain_info *info;
1485 bool has_iotlb_device = false;
1486
1487 assert_spin_locked(&device_domain_lock);
1488
1489 list_for_each_entry(info, &domain->devices, link) {
1490 struct pci_dev *pdev;
1491
1492 if (!info->dev || !dev_is_pci(info->dev))
1493 continue;
1494
1495 pdev = to_pci_dev(info->dev);
1496 if (pdev->ats_enabled) {
1497 has_iotlb_device = true;
1498 break;
1499 }
1500 }
1501
1502 domain->has_iotlb_device = has_iotlb_device;
1503}
1504
Yu Zhao93a23a72009-05-18 13:51:37 +08001505static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1506{
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001507 struct pci_dev *pdev;
1508
Omer Peleg0824c592016-04-20 19:03:35 +03001509 assert_spin_locked(&device_domain_lock);
1510
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001511 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001512 return;
1513
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001514 pdev = to_pci_dev(info->dev);
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001515
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001516#ifdef CONFIG_INTEL_IOMMU_SVM
1517 /* The PCIe spec, in its wisdom, declares that the behaviour of
1518 the device if you enable PASID support after ATS support is
1519 undefined. So always enable PASID support on devices which
1520 have it, even if we can't yet know if we're ever going to
1521 use it. */
1522 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1523 info->pasid_enabled = 1;
1524
1525 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1526 info->pri_enabled = 1;
1527#endif
1528 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1529 info->ats_enabled = 1;
Omer Peleg0824c592016-04-20 19:03:35 +03001530 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001531 info->ats_qdep = pci_ats_queue_depth(pdev);
1532 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001533}
1534
1535static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1536{
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001537 struct pci_dev *pdev;
1538
Omer Peleg0824c592016-04-20 19:03:35 +03001539 assert_spin_locked(&device_domain_lock);
1540
Jeremy McNicollda972fb2016-01-14 21:33:06 -08001541 if (!dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001542 return;
1543
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001544 pdev = to_pci_dev(info->dev);
1545
1546 if (info->ats_enabled) {
1547 pci_disable_ats(pdev);
1548 info->ats_enabled = 0;
Omer Peleg0824c592016-04-20 19:03:35 +03001549 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001550 }
1551#ifdef CONFIG_INTEL_IOMMU_SVM
1552 if (info->pri_enabled) {
1553 pci_disable_pri(pdev);
1554 info->pri_enabled = 0;
1555 }
1556 if (info->pasid_enabled) {
1557 pci_disable_pasid(pdev);
1558 info->pasid_enabled = 0;
1559 }
1560#endif
Yu Zhao93a23a72009-05-18 13:51:37 +08001561}
1562
1563static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1564 u64 addr, unsigned mask)
1565{
1566 u16 sid, qdep;
1567 unsigned long flags;
1568 struct device_domain_info *info;
1569
Omer Peleg0824c592016-04-20 19:03:35 +03001570 if (!domain->has_iotlb_device)
1571 return;
1572
Yu Zhao93a23a72009-05-18 13:51:37 +08001573 spin_lock_irqsave(&device_domain_lock, flags);
1574 list_for_each_entry(info, &domain->devices, link) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001575 if (!info->ats_enabled)
Yu Zhao93a23a72009-05-18 13:51:37 +08001576 continue;
1577
1578 sid = info->bus << 8 | info->devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001579 qdep = info->ats_qdep;
Yu Zhao93a23a72009-05-18 13:51:37 +08001580 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1581 }
1582 spin_unlock_irqrestore(&device_domain_lock, flags);
1583}
1584
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001585static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1586 struct dmar_domain *domain,
1587 unsigned long pfn, unsigned int pages,
1588 int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001589{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001590 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001591 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001592 u16 did = domain->iommu_did[iommu->seq_id];
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001594 BUG_ON(pages == 0);
1595
David Woodhouseea8ea462014-03-05 17:09:32 +00001596 if (ih)
1597 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001598 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001599 * Fallback to domain selective flush if no PSI support or the size is
1600 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001601 * PSI requires page size to be 2 ^ x, and the base address is naturally
1602 * aligned to the size
1603 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001604 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1605 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001606 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001607 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001608 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001609 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001610
1611 /*
Nadav Amit82653632010-04-01 13:24:40 +03001612 * In caching mode, changes of pages from non-present to present require
1613 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001614 */
Nadav Amit82653632010-04-01 13:24:40 +03001615 if (!cap_caching_mode(iommu->cap) || !map)
Joerg Roedel9452d5b2015-07-21 10:00:56 +02001616 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1617 addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001618}
1619
mark grossf8bab732008-02-08 04:18:38 -08001620static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1621{
1622 u32 pmen;
1623 unsigned long flags;
1624
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001625 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001626 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1627 pmen &= ~DMA_PMEN_EPM;
1628 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1629
1630 /* wait for the protected region status bit to clear */
1631 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1632 readl, !(pmen & DMA_PMEN_PRS), pmen);
1633
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001634 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001635}
1636
Jiang Liu2a41cce2014-07-11 14:19:33 +08001637static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638{
1639 u32 sts;
1640 unsigned long flags;
1641
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001642 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001643 iommu->gcmd |= DMA_GCMD_TE;
1644 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645
1646 /* Make sure hardware complete it */
1647 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001648 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001649
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001650 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651}
1652
Jiang Liu2a41cce2014-07-11 14:19:33 +08001653static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001654{
1655 u32 sts;
1656 unsigned long flag;
1657
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001658 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659 iommu->gcmd &= ~DMA_GCMD_TE;
1660 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1661
1662 /* Make sure hardware complete it */
1663 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001664 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001665
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001666 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667}
1668
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001669
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001670static int iommu_init_domains(struct intel_iommu *iommu)
1671{
Joerg Roedel8bf47812015-07-21 10:41:21 +02001672 u32 ndomains, nlongs;
1673 size_t size;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674
1675 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001676 pr_debug("%s: Number of Domains supported <%d>\n",
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001677 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678 nlongs = BITS_TO_LONGS(ndomains);
1679
Donald Dutile94a91b502009-08-20 16:51:34 -04001680 spin_lock_init(&iommu->lock);
1681
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001682 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1683 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001684 pr_err("%s: Allocating domain id array failed\n",
1685 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686 return -ENOMEM;
1687 }
Joerg Roedel8bf47812015-07-21 10:41:21 +02001688
Wei Yang86f004c2016-05-21 02:41:51 +00001689 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001690 iommu->domains = kzalloc(size, GFP_KERNEL);
1691
1692 if (iommu->domains) {
1693 size = 256 * sizeof(struct dmar_domain *);
1694 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1695 }
1696
1697 if (!iommu->domains || !iommu->domains[0]) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001698 pr_err("%s: Allocating domain array failed\n",
1699 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001700 kfree(iommu->domain_ids);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001701 kfree(iommu->domains);
Jiang Liu852bdb02014-01-06 14:18:11 +08001702 iommu->domain_ids = NULL;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001703 iommu->domains = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001704 return -ENOMEM;
1705 }
1706
Joerg Roedel8bf47812015-07-21 10:41:21 +02001707
1708
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001709 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001710 * If Caching mode is set, then invalid translations are tagged
1711 * with domain-id 0, hence we need to pre-allocate it. We also
1712 * use domain-id 0 as a marker for non-allocated domain-id, so
1713 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001715 set_bit(0, iommu->domain_ids);
1716
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717 return 0;
1718}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719
Jiang Liuffebeb42014-11-09 22:48:02 +08001720static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721{
Joerg Roedel29a27712015-07-21 17:17:12 +02001722 struct device_domain_info *info, *tmp;
Joerg Roedel55d94042015-07-22 16:50:40 +02001723 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724
Joerg Roedel29a27712015-07-21 17:17:12 +02001725 if (!iommu->domains || !iommu->domain_ids)
1726 return;
Jiang Liua4eaa862014-02-19 14:07:30 +08001727
Joerg Roedelbea64032016-11-08 15:08:26 +01001728again:
Joerg Roedel55d94042015-07-22 16:50:40 +02001729 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001730 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1731 struct dmar_domain *domain;
1732
1733 if (info->iommu != iommu)
1734 continue;
1735
1736 if (!info->dev || !info->domain)
1737 continue;
1738
1739 domain = info->domain;
1740
Joerg Roedelbea64032016-11-08 15:08:26 +01001741 __dmar_remove_one_dev_info(info);
Joerg Roedel29a27712015-07-21 17:17:12 +02001742
Joerg Roedelbea64032016-11-08 15:08:26 +01001743 if (!domain_type_is_vm_or_si(domain)) {
1744 /*
1745 * The domain_exit() function can't be called under
1746 * device_domain_lock, as it takes this lock itself.
1747 * So release the lock here and re-run the loop
1748 * afterwards.
1749 */
1750 spin_unlock_irqrestore(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001751 domain_exit(domain);
Joerg Roedelbea64032016-11-08 15:08:26 +01001752 goto again;
1753 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754 }
Joerg Roedel55d94042015-07-22 16:50:40 +02001755 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001756
1757 if (iommu->gcmd & DMA_GCMD_TE)
1758 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001759}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001760
Jiang Liuffebeb42014-11-09 22:48:02 +08001761static void free_dmar_iommu(struct intel_iommu *iommu)
1762{
1763 if ((iommu->domains) && (iommu->domain_ids)) {
Wei Yang86f004c2016-05-21 02:41:51 +00001764 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001765 int i;
1766
1767 for (i = 0; i < elems; i++)
1768 kfree(iommu->domains[i]);
Jiang Liuffebeb42014-11-09 22:48:02 +08001769 kfree(iommu->domains);
1770 kfree(iommu->domain_ids);
1771 iommu->domains = NULL;
1772 iommu->domain_ids = NULL;
1773 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001774
Weidong Hand9630fe2008-12-08 11:06:32 +08001775 g_iommus[iommu->seq_id] = NULL;
1776
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001777 /* free context mapping */
1778 free_context_table(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001779
1780#ifdef CONFIG_INTEL_IOMMU_SVM
David Woodhousea222a7f2015-10-07 23:35:18 +01001781 if (pasid_enabled(iommu)) {
1782 if (ecap_prs(iommu->ecap))
1783 intel_svm_finish_prq(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001784 intel_svm_free_pasid_tables(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01001785 }
David Woodhouse8a94ade2015-03-24 14:54:56 +00001786#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787}
1788
Jiang Liuab8dfe22014-07-11 14:19:27 +08001789static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792
1793 domain = alloc_domain_mem();
1794 if (!domain)
1795 return NULL;
1796
Jiang Liuab8dfe22014-07-11 14:19:27 +08001797 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001798 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001799 domain->flags = flags;
Omer Peleg0824c592016-04-20 19:03:35 +03001800 domain->has_iotlb_device = false;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001801 INIT_LIST_HEAD(&domain->devices);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001802
1803 return domain;
1804}
1805
Joerg Roedeld160aca2015-07-22 11:52:53 +02001806/* Must be called with iommu->lock */
1807static int domain_attach_iommu(struct dmar_domain *domain,
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001808 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001809{
Jiang Liu44bde612014-07-11 14:19:29 +08001810 unsigned long ndomains;
Joerg Roedel55d94042015-07-22 16:50:40 +02001811 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001812
Joerg Roedel55d94042015-07-22 16:50:40 +02001813 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001814 assert_spin_locked(&iommu->lock);
Jiang Liu44bde612014-07-11 14:19:29 +08001815
Joerg Roedel29a27712015-07-21 17:17:12 +02001816 domain->iommu_refcnt[iommu->seq_id] += 1;
1817 domain->iommu_count += 1;
1818 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
Jiang Liufb170fb2014-07-11 14:19:28 +08001819 ndomains = cap_ndoms(iommu->cap);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001820 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1821
1822 if (num >= ndomains) {
1823 pr_err("%s: No free domain ids\n", iommu->name);
1824 domain->iommu_refcnt[iommu->seq_id] -= 1;
1825 domain->iommu_count -= 1;
Joerg Roedel55d94042015-07-22 16:50:40 +02001826 return -ENOSPC;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001827 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001828
Joerg Roedeld160aca2015-07-22 11:52:53 +02001829 set_bit(num, iommu->domain_ids);
1830 set_iommu_domain(iommu, num, domain);
Jiang Liufb170fb2014-07-11 14:19:28 +08001831
Joerg Roedeld160aca2015-07-22 11:52:53 +02001832 domain->iommu_did[iommu->seq_id] = num;
1833 domain->nid = iommu->node;
1834
Jiang Liufb170fb2014-07-11 14:19:28 +08001835 domain_update_iommu_cap(domain);
1836 }
Joerg Roedeld160aca2015-07-22 11:52:53 +02001837
Joerg Roedel55d94042015-07-22 16:50:40 +02001838 return 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001839}
1840
1841static int domain_detach_iommu(struct dmar_domain *domain,
1842 struct intel_iommu *iommu)
1843{
Joerg Roedeld160aca2015-07-22 11:52:53 +02001844 int num, count = INT_MAX;
Jiang Liufb170fb2014-07-11 14:19:28 +08001845
Joerg Roedel55d94042015-07-22 16:50:40 +02001846 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001847 assert_spin_locked(&iommu->lock);
Jiang Liufb170fb2014-07-11 14:19:28 +08001848
Joerg Roedel29a27712015-07-21 17:17:12 +02001849 domain->iommu_refcnt[iommu->seq_id] -= 1;
1850 count = --domain->iommu_count;
1851 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02001852 num = domain->iommu_did[iommu->seq_id];
1853 clear_bit(num, iommu->domain_ids);
1854 set_iommu_domain(iommu, num, NULL);
1855
Jiang Liufb170fb2014-07-11 14:19:28 +08001856 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001857 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001858 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001859
1860 return count;
1861}
1862
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001863static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001864static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865
Joseph Cihula51a63e62011-03-21 11:04:24 -07001866static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001867{
1868 struct pci_dev *pdev = NULL;
1869 struct iova *iova;
1870 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001872 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1873 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874
Mark Gross8a443df2008-03-04 14:59:31 -08001875 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1876 &reserved_rbtree_key);
1877
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001878 /* IOAPIC ranges shouldn't be accessed by DMA */
1879 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1880 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001881 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001882 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001883 return -ENODEV;
1884 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885
1886 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1887 for_each_pci_dev(pdev) {
1888 struct resource *r;
1889
1890 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1891 r = &pdev->resource[i];
1892 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1893 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001894 iova = reserve_iova(&reserved_iova_list,
1895 IOVA_PFN(r->start),
1896 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001897 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001898 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001899 return -ENODEV;
1900 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001901 }
1902 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001903 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001904}
1905
1906static void domain_reserve_special_ranges(struct dmar_domain *domain)
1907{
1908 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1909}
1910
1911static inline int guestwidth_to_adjustwidth(int gaw)
1912{
1913 int agaw;
1914 int r = (gaw - 12) % 9;
1915
1916 if (r == 0)
1917 agaw = gaw;
1918 else
1919 agaw = gaw + 9 - r;
1920 if (agaw > 64)
1921 agaw = 64;
1922 return agaw;
1923}
1924
Joerg Roedeldc534b22015-07-22 12:44:02 +02001925static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1926 int guest_width)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001927{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928 int adjust_width, agaw;
1929 unsigned long sagaw;
1930
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001931 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1932 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933 domain_reserve_special_ranges(domain);
1934
1935 /* calculate AGAW */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936 if (guest_width > cap_mgaw(iommu->cap))
1937 guest_width = cap_mgaw(iommu->cap);
1938 domain->gaw = guest_width;
1939 adjust_width = guestwidth_to_adjustwidth(guest_width);
1940 agaw = width_to_agaw(adjust_width);
1941 sagaw = cap_sagaw(iommu->cap);
1942 if (!test_bit(agaw, &sagaw)) {
1943 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001944 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 agaw = find_next_bit(&sagaw, 5, agaw);
1946 if (agaw >= 5)
1947 return -ENODEV;
1948 }
1949 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001950
Weidong Han8e6040972008-12-08 15:49:06 +08001951 if (ecap_coherent(iommu->ecap))
1952 domain->iommu_coherency = 1;
1953 else
1954 domain->iommu_coherency = 0;
1955
Sheng Yang58c610b2009-03-18 15:33:05 +08001956 if (ecap_sc_support(iommu->ecap))
1957 domain->iommu_snooping = 1;
1958 else
1959 domain->iommu_snooping = 0;
1960
David Woodhouse214e39a2014-03-19 10:38:49 +00001961 if (intel_iommu_superpage)
1962 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1963 else
1964 domain->iommu_superpage = 0;
1965
Suresh Siddha4c923d42009-10-02 11:01:24 -07001966 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001967
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001969 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970 if (!domain->pgd)
1971 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001972 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001973 return 0;
1974}
1975
1976static void domain_exit(struct dmar_domain *domain)
1977{
David Woodhouseea8ea462014-03-05 17:09:32 +00001978 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001979
1980 /* Domain 0 is reserved, so dont process it */
1981 if (!domain)
1982 return;
1983
Alex Williamson7b668352011-05-24 12:02:41 +01001984 /* Flush any lazy unmaps that may reference this domain */
Omer Pelegaa473242016-04-20 11:33:02 +03001985 if (!intel_iommu_strict) {
1986 int cpu;
1987
1988 for_each_possible_cpu(cpu)
1989 flush_unmaps_timeout(cpu);
1990 }
Alex Williamson7b668352011-05-24 12:02:41 +01001991
Joerg Roedeld160aca2015-07-22 11:52:53 +02001992 /* Remove associated devices and clear attached or cached domains */
1993 rcu_read_lock();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001994 domain_remove_dev_info(domain);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001995 rcu_read_unlock();
Jiang Liu92d03cc2014-02-19 14:07:28 +08001996
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001997 /* destroy iovas */
1998 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001999
David Woodhouseea8ea462014-03-05 17:09:32 +00002000 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002001
David Woodhouseea8ea462014-03-05 17:09:32 +00002002 dma_free_pagelist(freelist);
2003
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004 free_domain_mem(domain);
2005}
2006
David Woodhouse64ae8922014-03-09 12:52:30 -07002007static int domain_context_mapping_one(struct dmar_domain *domain,
2008 struct intel_iommu *iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002009 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002010{
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002011 u16 did = domain->iommu_did[iommu->seq_id];
Joerg Roedel28ccce02015-07-21 14:45:31 +02002012 int translation = CONTEXT_TT_MULTI_LEVEL;
2013 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002014 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002015 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08002016 struct dma_pte *pgd;
Joerg Roedel55d94042015-07-22 16:50:40 +02002017 int ret, agaw;
Joerg Roedel28ccce02015-07-21 14:45:31 +02002018
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002019 WARN_ON(did == 0);
2020
Joerg Roedel28ccce02015-07-21 14:45:31 +02002021 if (hw_pass_through && domain_type_is_si(domain))
2022 translation = CONTEXT_TT_PASS_THROUGH;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002023
2024 pr_debug("Set context mapping for %02x:%02x.%d\n",
2025 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002026
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002027 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08002028
Joerg Roedel55d94042015-07-22 16:50:40 +02002029 spin_lock_irqsave(&device_domain_lock, flags);
2030 spin_lock(&iommu->lock);
2031
2032 ret = -ENOMEM;
David Woodhouse03ecc322015-02-13 14:35:21 +00002033 context = iommu_context_addr(iommu, bus, devfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002034 if (!context)
Joerg Roedel55d94042015-07-22 16:50:40 +02002035 goto out_unlock;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002036
Joerg Roedel55d94042015-07-22 16:50:40 +02002037 ret = 0;
2038 if (context_present(context))
2039 goto out_unlock;
Joerg Roedelcf484d02015-06-12 12:21:46 +02002040
Xunlei Pangaec0e862016-12-05 20:09:07 +08002041 /*
2042 * For kdump cases, old valid entries may be cached due to the
2043 * in-flight DMA and copied pgtable, but there is no unmapping
2044 * behaviour for them, thus we need an explicit cache flush for
2045 * the newly-mapped device. For kdump, at this point, the device
2046 * is supposed to finish reset at its driver probe stage, so no
2047 * in-flight DMA will exist, and we don't need to worry anymore
2048 * hereafter.
2049 */
2050 if (context_copied(context)) {
2051 u16 did_old = context_domain_id(context);
2052
2053 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
2054 iommu->flush.flush_context(iommu, did_old,
2055 (((u16)bus) << 8) | devfn,
2056 DMA_CCMD_MASK_NOBIT,
2057 DMA_CCMD_DEVICE_INVL);
2058 }
2059
Weidong Hanea6606b2008-12-08 23:08:15 +08002060 pgd = domain->pgd;
2061
Joerg Roedelde24e552015-07-21 14:53:04 +02002062 context_clear_entry(context);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002063 context_set_domain_id(context, did);
Weidong Hanea6606b2008-12-08 23:08:15 +08002064
Joerg Roedelde24e552015-07-21 14:53:04 +02002065 /*
2066 * Skip top levels of page tables for iommu which has less agaw
2067 * than default. Unnecessary for PT mode.
2068 */
Yu Zhao93a23a72009-05-18 13:51:37 +08002069 if (translation != CONTEXT_TT_PASS_THROUGH) {
Joerg Roedelde24e552015-07-21 14:53:04 +02002070 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
Joerg Roedel55d94042015-07-22 16:50:40 +02002071 ret = -ENOMEM;
Joerg Roedelde24e552015-07-21 14:53:04 +02002072 pgd = phys_to_virt(dma_pte_addr(pgd));
Joerg Roedel55d94042015-07-22 16:50:40 +02002073 if (!dma_pte_present(pgd))
2074 goto out_unlock;
Joerg Roedelde24e552015-07-21 14:53:04 +02002075 }
2076
David Woodhouse64ae8922014-03-09 12:52:30 -07002077 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002078 if (info && info->ats_supported)
2079 translation = CONTEXT_TT_DEV_IOTLB;
2080 else
2081 translation = CONTEXT_TT_MULTI_LEVEL;
Joerg Roedelde24e552015-07-21 14:53:04 +02002082
Yu Zhao93a23a72009-05-18 13:51:37 +08002083 context_set_address_root(context, virt_to_phys(pgd));
2084 context_set_address_width(context, iommu->agaw);
Joerg Roedelde24e552015-07-21 14:53:04 +02002085 } else {
2086 /*
2087 * In pass through mode, AW must be programmed to
2088 * indicate the largest AGAW value supported by
2089 * hardware. And ASR is ignored by hardware.
2090 */
2091 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08002092 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002093
2094 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00002095 context_set_fault_enable(context);
2096 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08002097 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002099 /*
2100 * It's a non-present to present mapping. If hardware doesn't cache
2101 * non-present entry we only need to flush the write-buffer. If the
2102 * _does_ cache non-present entries, then it does so in the special
2103 * domain #0, which we have to flush:
2104 */
2105 if (cap_caching_mode(iommu->cap)) {
2106 iommu->flush.flush_context(iommu, 0,
2107 (((u16)bus) << 8) | devfn,
2108 DMA_CCMD_MASK_NOBIT,
2109 DMA_CCMD_DEVICE_INVL);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002110 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002111 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002112 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002113 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002114 iommu_enable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08002115
Joerg Roedel55d94042015-07-22 16:50:40 +02002116 ret = 0;
2117
2118out_unlock:
2119 spin_unlock(&iommu->lock);
2120 spin_unlock_irqrestore(&device_domain_lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08002121
Wei Yang5c365d12016-07-13 13:53:21 +00002122 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002123}
2124
Alex Williamson579305f2014-07-03 09:51:43 -06002125struct domain_context_mapping_data {
2126 struct dmar_domain *domain;
2127 struct intel_iommu *iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002128};
2129
2130static int domain_context_mapping_cb(struct pci_dev *pdev,
2131 u16 alias, void *opaque)
2132{
2133 struct domain_context_mapping_data *data = opaque;
2134
2135 return domain_context_mapping_one(data->domain, data->iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002136 PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06002137}
2138
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002139static int
Joerg Roedel28ccce02015-07-21 14:45:31 +02002140domain_context_mapping(struct dmar_domain *domain, struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141{
David Woodhouse64ae8922014-03-09 12:52:30 -07002142 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002143 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06002144 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002145
David Woodhousee1f167f2014-03-09 15:24:46 -07002146 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002147 if (!iommu)
2148 return -ENODEV;
2149
Alex Williamson579305f2014-07-03 09:51:43 -06002150 if (!dev_is_pci(dev))
Joerg Roedel28ccce02015-07-21 14:45:31 +02002151 return domain_context_mapping_one(domain, iommu, bus, devfn);
Alex Williamson579305f2014-07-03 09:51:43 -06002152
2153 data.domain = domain;
2154 data.iommu = iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002155
2156 return pci_for_each_dma_alias(to_pci_dev(dev),
2157 &domain_context_mapping_cb, &data);
2158}
2159
2160static int domain_context_mapped_cb(struct pci_dev *pdev,
2161 u16 alias, void *opaque)
2162{
2163 struct intel_iommu *iommu = opaque;
2164
2165 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002166}
2167
David Woodhousee1f167f2014-03-09 15:24:46 -07002168static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002169{
Weidong Han5331fe62008-12-08 23:00:00 +08002170 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002171 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002172
David Woodhousee1f167f2014-03-09 15:24:46 -07002173 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002174 if (!iommu)
2175 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002176
Alex Williamson579305f2014-07-03 09:51:43 -06002177 if (!dev_is_pci(dev))
2178 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002179
Alex Williamson579305f2014-07-03 09:51:43 -06002180 return !pci_for_each_dma_alias(to_pci_dev(dev),
2181 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182}
2183
Fenghua Yuf5329592009-08-04 15:09:37 -07002184/* Returns a number of VTD pages, but aligned to MM page size */
2185static inline unsigned long aligned_nrpages(unsigned long host_addr,
2186 size_t size)
2187{
2188 host_addr &= ~PAGE_MASK;
2189 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2190}
2191
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002192/* Return largest possible superpage level for a given mapping */
2193static inline int hardware_largepage_caps(struct dmar_domain *domain,
2194 unsigned long iov_pfn,
2195 unsigned long phy_pfn,
2196 unsigned long pages)
2197{
2198 int support, level = 1;
2199 unsigned long pfnmerge;
2200
2201 support = domain->iommu_superpage;
2202
2203 /* To use a large page, the virtual *and* physical addresses
2204 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2205 of them will mean we have to use smaller pages. So just
2206 merge them and check both at once. */
2207 pfnmerge = iov_pfn | phy_pfn;
2208
2209 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2210 pages >>= VTD_STRIDE_SHIFT;
2211 if (!pages)
2212 break;
2213 pfnmerge >>= VTD_STRIDE_SHIFT;
2214 level++;
2215 support--;
2216 }
2217 return level;
2218}
2219
David Woodhouse9051aa02009-06-29 12:30:54 +01002220static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2221 struct scatterlist *sg, unsigned long phys_pfn,
2222 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002223{
2224 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002225 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002226 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002227 unsigned int largepage_lvl = 0;
2228 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002229
Jiang Liu162d1b12014-07-11 14:19:35 +08002230 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002231
2232 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2233 return -EINVAL;
2234
2235 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2236
Jiang Liucc4f14a2014-11-26 09:42:10 +08002237 if (!sg) {
2238 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002239 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2240 }
2241
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002242 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002243 uint64_t tmp;
2244
David Woodhousee1605492009-06-29 11:17:38 +01002245 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002246 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002247 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2248 sg->dma_length = sg->length;
Dan Williams3e6110f2015-12-15 12:54:06 -08002249 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002250 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002251 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002252
David Woodhousee1605492009-06-29 11:17:38 +01002253 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002254 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2255
David Woodhouse5cf0a762014-03-19 16:07:49 +00002256 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002257 if (!pte)
2258 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002259 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002260 if (largepage_lvl > 1) {
Christian Zanderba2374f2015-06-10 09:41:45 -07002261 unsigned long nr_superpages, end_pfn;
2262
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002263 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002264 lvl_pages = lvl_to_nr_pages(largepage_lvl);
Christian Zanderba2374f2015-06-10 09:41:45 -07002265
2266 nr_superpages = sg_res / lvl_pages;
2267 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2268
Jiang Liud41a4ad2014-07-11 14:19:34 +08002269 /*
2270 * Ensure that old small page tables are
Christian Zanderba2374f2015-06-10 09:41:45 -07002271 * removed to make room for superpage(s).
Jiang Liud41a4ad2014-07-11 14:19:34 +08002272 */
Christian Zanderba2374f2015-06-10 09:41:45 -07002273 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002274 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002275 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002276 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002277
David Woodhousee1605492009-06-29 11:17:38 +01002278 }
2279 /* We don't need lock here, nobody else
2280 * touches the iova range
2281 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002282 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002283 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002284 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002285 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2286 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002287 if (dumps) {
2288 dumps--;
2289 debug_dma_dump_mappings(NULL);
2290 }
2291 WARN_ON(1);
2292 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002293
2294 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2295
2296 BUG_ON(nr_pages < lvl_pages);
2297 BUG_ON(sg_res < lvl_pages);
2298
2299 nr_pages -= lvl_pages;
2300 iov_pfn += lvl_pages;
2301 phys_pfn += lvl_pages;
2302 pteval += lvl_pages * VTD_PAGE_SIZE;
2303 sg_res -= lvl_pages;
2304
2305 /* If the next PTE would be the first in a new page, then we
2306 need to flush the cache on the entries we've just written.
2307 And then we'll need to recalculate 'pte', so clear it and
2308 let it get set again in the if (!pte) block above.
2309
2310 If we're done (!nr_pages) we need to flush the cache too.
2311
2312 Also if we've been setting superpages, we may need to
2313 recalculate 'pte' and switch back to smaller pages for the
2314 end of the mapping, if the trailing size is not enough to
2315 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002316 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002317 if (!nr_pages || first_pte_in_page(pte) ||
2318 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002319 domain_flush_cache(domain, first_pte,
2320 (void *)pte - (void *)first_pte);
2321 pte = NULL;
2322 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002323
2324 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002325 sg = sg_next(sg);
2326 }
2327 return 0;
2328}
2329
David Woodhouse9051aa02009-06-29 12:30:54 +01002330static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2331 struct scatterlist *sg, unsigned long nr_pages,
2332 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002333{
David Woodhouse9051aa02009-06-29 12:30:54 +01002334 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2335}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002336
David Woodhouse9051aa02009-06-29 12:30:54 +01002337static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2338 unsigned long phys_pfn, unsigned long nr_pages,
2339 int prot)
2340{
2341 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002342}
2343
Joerg Roedel2452d9d2015-07-23 16:20:14 +02002344static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002345{
Weidong Hanc7151a82008-12-08 22:51:37 +08002346 if (!iommu)
2347 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002348
2349 clear_context_table(iommu, bus, devfn);
2350 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002351 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002352 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002353}
2354
David Woodhouse109b9b02012-05-25 17:43:02 +01002355static inline void unlink_domain_info(struct device_domain_info *info)
2356{
2357 assert_spin_locked(&device_domain_lock);
2358 list_del(&info->link);
2359 list_del(&info->global);
2360 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002361 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002362}
2363
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002364static void domain_remove_dev_info(struct dmar_domain *domain)
2365{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002366 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002367 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002368
2369 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel76f45fe2015-07-21 18:25:11 +02002370 list_for_each_entry_safe(info, tmp, &domain->devices, link)
Joerg Roedel127c7612015-07-23 17:44:46 +02002371 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002372 spin_unlock_irqrestore(&device_domain_lock, flags);
2373}
2374
2375/*
2376 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002377 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378 */
David Woodhouse1525a292014-03-06 16:19:30 +00002379static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380{
2381 struct device_domain_info *info;
2382
2383 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002384 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002385 if (info)
2386 return info->domain;
2387 return NULL;
2388}
2389
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002390static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002391dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2392{
2393 struct device_domain_info *info;
2394
2395 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002396 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002397 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002398 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002399
2400 return NULL;
2401}
2402
Joerg Roedel5db31562015-07-22 12:40:43 +02002403static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2404 int bus, int devfn,
2405 struct device *dev,
2406 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002407{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002408 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002409 struct device_domain_info *info;
2410 unsigned long flags;
Joerg Roedeld160aca2015-07-22 11:52:53 +02002411 int ret;
Jiang Liu745f2582014-02-19 14:07:26 +08002412
2413 info = alloc_devinfo_mem();
2414 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002415 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002416
Jiang Liu745f2582014-02-19 14:07:26 +08002417 info->bus = bus;
2418 info->devfn = devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002419 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2420 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2421 info->ats_qdep = 0;
Jiang Liu745f2582014-02-19 14:07:26 +08002422 info->dev = dev;
2423 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002424 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002425
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002426 if (dev && dev_is_pci(dev)) {
2427 struct pci_dev *pdev = to_pci_dev(info->dev);
2428
2429 if (ecap_dev_iotlb_support(iommu->ecap) &&
2430 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2431 dmar_find_matched_atsr_unit(pdev))
2432 info->ats_supported = 1;
2433
2434 if (ecs_enabled(iommu)) {
2435 if (pasid_enabled(iommu)) {
2436 int features = pci_pasid_features(pdev);
2437 if (features >= 0)
2438 info->pasid_supported = features | 1;
2439 }
2440
2441 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2442 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2443 info->pri_supported = 1;
2444 }
2445 }
2446
Jiang Liu745f2582014-02-19 14:07:26 +08002447 spin_lock_irqsave(&device_domain_lock, flags);
2448 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002449 found = find_domain(dev);
Joerg Roedelf303e502015-07-23 18:37:13 +02002450
2451 if (!found) {
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002452 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002453 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
Joerg Roedelf303e502015-07-23 18:37:13 +02002454 if (info2) {
2455 found = info2->domain;
2456 info2->dev = dev;
2457 }
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002458 }
Joerg Roedelf303e502015-07-23 18:37:13 +02002459
Jiang Liu745f2582014-02-19 14:07:26 +08002460 if (found) {
2461 spin_unlock_irqrestore(&device_domain_lock, flags);
2462 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002463 /* Caller must free the original domain */
2464 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002465 }
2466
Joerg Roedeld160aca2015-07-22 11:52:53 +02002467 spin_lock(&iommu->lock);
2468 ret = domain_attach_iommu(domain, iommu);
2469 spin_unlock(&iommu->lock);
2470
2471 if (ret) {
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002472 spin_unlock_irqrestore(&device_domain_lock, flags);
Sudip Mukherjee499f3aa2015-09-18 16:27:07 +05302473 free_devinfo_mem(info);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002474 return NULL;
2475 }
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002476
David Woodhouseb718cd32014-03-09 13:11:33 -07002477 list_add(&info->link, &domain->devices);
2478 list_add(&info->global, &device_domain_list);
2479 if (dev)
2480 dev->archdata.iommu = info;
2481 spin_unlock_irqrestore(&device_domain_lock, flags);
2482
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002483 if (dev && domain_context_mapping(domain, dev)) {
2484 pr_err("Domain context map for %s failed\n", dev_name(dev));
Joerg Roedele6de0f82015-07-22 16:30:36 +02002485 dmar_remove_one_dev_info(domain, dev);
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002486 return NULL;
2487 }
2488
David Woodhouseb718cd32014-03-09 13:11:33 -07002489 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002490}
2491
Alex Williamson579305f2014-07-03 09:51:43 -06002492static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2493{
2494 *(u16 *)opaque = alias;
2495 return 0;
2496}
2497
Joerg Roedel76208352016-08-25 14:25:12 +02002498static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002499{
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002500 struct device_domain_info *info = NULL;
Joerg Roedel76208352016-08-25 14:25:12 +02002501 struct dmar_domain *domain = NULL;
Alex Williamson579305f2014-07-03 09:51:43 -06002502 struct intel_iommu *iommu;
Joerg Roedel08a7f452015-07-23 18:09:11 +02002503 u16 req_id, dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002504 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002505 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002506
David Woodhouse146922e2014-03-09 15:44:17 -07002507 iommu = device_to_iommu(dev, &bus, &devfn);
2508 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002509 return NULL;
2510
Joerg Roedel08a7f452015-07-23 18:09:11 +02002511 req_id = ((u16)bus << 8) | devfn;
2512
Alex Williamson579305f2014-07-03 09:51:43 -06002513 if (dev_is_pci(dev)) {
2514 struct pci_dev *pdev = to_pci_dev(dev);
2515
2516 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2517
2518 spin_lock_irqsave(&device_domain_lock, flags);
2519 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2520 PCI_BUS_NUM(dma_alias),
2521 dma_alias & 0xff);
2522 if (info) {
2523 iommu = info->iommu;
2524 domain = info->domain;
2525 }
2526 spin_unlock_irqrestore(&device_domain_lock, flags);
2527
Joerg Roedel76208352016-08-25 14:25:12 +02002528 /* DMA alias already has a domain, use it */
Alex Williamson579305f2014-07-03 09:51:43 -06002529 if (info)
Joerg Roedel76208352016-08-25 14:25:12 +02002530 goto out;
Alex Williamson579305f2014-07-03 09:51:43 -06002531 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002532
David Woodhouse146922e2014-03-09 15:44:17 -07002533 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002534 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002535 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002536 return NULL;
Joerg Roedeldc534b22015-07-22 12:44:02 +02002537 if (domain_init(domain, iommu, gaw)) {
Alex Williamson579305f2014-07-03 09:51:43 -06002538 domain_exit(domain);
2539 return NULL;
2540 }
2541
Joerg Roedel76208352016-08-25 14:25:12 +02002542out:
Alex Williamson579305f2014-07-03 09:51:43 -06002543
Joerg Roedel76208352016-08-25 14:25:12 +02002544 return domain;
2545}
2546
2547static struct dmar_domain *set_domain_for_dev(struct device *dev,
2548 struct dmar_domain *domain)
2549{
2550 struct intel_iommu *iommu;
2551 struct dmar_domain *tmp;
2552 u16 req_id, dma_alias;
2553 u8 bus, devfn;
2554
2555 iommu = device_to_iommu(dev, &bus, &devfn);
2556 if (!iommu)
2557 return NULL;
2558
2559 req_id = ((u16)bus << 8) | devfn;
2560
2561 if (dev_is_pci(dev)) {
2562 struct pci_dev *pdev = to_pci_dev(dev);
2563
2564 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2565
2566 /* register PCI DMA alias device */
2567 if (req_id != dma_alias) {
2568 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2569 dma_alias & 0xff, NULL, domain);
2570
2571 if (!tmp || tmp != domain)
2572 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002573 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002574 }
2575
Joerg Roedel5db31562015-07-22 12:40:43 +02002576 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
Joerg Roedel76208352016-08-25 14:25:12 +02002577 if (!tmp || tmp != domain)
2578 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002579
Joerg Roedel76208352016-08-25 14:25:12 +02002580 return domain;
2581}
2582
2583static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2584{
2585 struct dmar_domain *domain, *tmp;
2586
2587 domain = find_domain(dev);
2588 if (domain)
2589 goto out;
2590
2591 domain = find_or_alloc_domain(dev, gaw);
2592 if (!domain)
2593 goto out;
2594
2595 tmp = set_domain_for_dev(dev, domain);
2596 if (!tmp || domain != tmp) {
Alex Williamson579305f2014-07-03 09:51:43 -06002597 domain_exit(domain);
2598 domain = tmp;
2599 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002600
Joerg Roedel76208352016-08-25 14:25:12 +02002601out:
2602
David Woodhouseb718cd32014-03-09 13:11:33 -07002603 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002604}
2605
David Woodhouseb2132032009-06-26 18:50:28 +01002606static int iommu_domain_identity_map(struct dmar_domain *domain,
2607 unsigned long long start,
2608 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002609{
David Woodhousec5395d52009-06-28 16:35:56 +01002610 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2611 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002612
David Woodhousec5395d52009-06-28 16:35:56 +01002613 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2614 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002615 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002616 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002617 }
2618
Joerg Roedelaf1089c2015-07-21 15:45:19 +02002619 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002620 /*
2621 * RMRR range might have overlap with physical memory range,
2622 * clear it first
2623 */
David Woodhousec5395d52009-06-28 16:35:56 +01002624 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002625
David Woodhousec5395d52009-06-28 16:35:56 +01002626 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2627 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002628 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002629}
2630
Joerg Roedeld66ce542015-09-23 19:00:10 +02002631static int domain_prepare_identity_map(struct device *dev,
2632 struct dmar_domain *domain,
2633 unsigned long long start,
2634 unsigned long long end)
David Woodhouseb2132032009-06-26 18:50:28 +01002635{
David Woodhouse19943b02009-08-04 16:19:20 +01002636 /* For _hardware_ passthrough, don't bother. But for software
2637 passthrough, we do it anyway -- it may indicate a memory
2638 range which is reserved in E820, so which didn't get set
2639 up to start with in si_domain */
2640 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002641 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2642 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002643 return 0;
2644 }
2645
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002646 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2647 dev_name(dev), start, end);
2648
David Woodhouse5595b522009-12-02 09:21:55 +00002649 if (end < start) {
2650 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2651 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2652 dmi_get_system_info(DMI_BIOS_VENDOR),
2653 dmi_get_system_info(DMI_BIOS_VERSION),
2654 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002655 return -EIO;
David Woodhouse5595b522009-12-02 09:21:55 +00002656 }
2657
David Woodhouse2ff729f2009-08-26 14:25:41 +01002658 if (end >> agaw_to_width(domain->agaw)) {
2659 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2660 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2661 agaw_to_width(domain->agaw),
2662 dmi_get_system_info(DMI_BIOS_VENDOR),
2663 dmi_get_system_info(DMI_BIOS_VERSION),
2664 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002665 return -EIO;
David Woodhouse2ff729f2009-08-26 14:25:41 +01002666 }
David Woodhouse19943b02009-08-04 16:19:20 +01002667
Joerg Roedeld66ce542015-09-23 19:00:10 +02002668 return iommu_domain_identity_map(domain, start, end);
2669}
2670
2671static int iommu_prepare_identity_map(struct device *dev,
2672 unsigned long long start,
2673 unsigned long long end)
2674{
2675 struct dmar_domain *domain;
2676 int ret;
2677
2678 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2679 if (!domain)
2680 return -ENOMEM;
2681
2682 ret = domain_prepare_identity_map(dev, domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002683 if (ret)
Joerg Roedeld66ce542015-09-23 19:00:10 +02002684 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002685
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002686 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002687}
2688
2689static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002690 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002691{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002692 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002693 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002694 return iommu_prepare_identity_map(dev, rmrr->base_address,
2695 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002696}
2697
Suresh Siddhad3f13812011-08-23 17:05:25 -07002698#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002699static inline void iommu_prepare_isa(void)
2700{
2701 struct pci_dev *pdev;
2702 int ret;
2703
2704 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2705 if (!pdev)
2706 return;
2707
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002708 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002709 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002710
2711 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002712 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002713
Yijing Wang9b27e822014-05-20 20:37:52 +08002714 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002715}
2716#else
2717static inline void iommu_prepare_isa(void)
2718{
2719 return;
2720}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002721#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002722
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002723static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002724
Matt Kraai071e1372009-08-23 22:30:22 -07002725static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002726{
David Woodhousec7ab48d2009-06-26 19:10:36 +01002727 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002728
Jiang Liuab8dfe22014-07-11 14:19:27 +08002729 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002730 if (!si_domain)
2731 return -EFAULT;
2732
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002733 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2734 domain_exit(si_domain);
2735 return -EFAULT;
2736 }
2737
Joerg Roedel0dc79712015-07-21 15:40:06 +02002738 pr_debug("Identity mapping domain allocated\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002739
David Woodhouse19943b02009-08-04 16:19:20 +01002740 if (hw)
2741 return 0;
2742
David Woodhousec7ab48d2009-06-26 19:10:36 +01002743 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002744 unsigned long start_pfn, end_pfn;
2745 int i;
2746
2747 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2748 ret = iommu_domain_identity_map(si_domain,
2749 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2750 if (ret)
2751 return ret;
2752 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002753 }
2754
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755 return 0;
2756}
2757
David Woodhouse9b226622014-03-09 14:03:28 -07002758static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002759{
2760 struct device_domain_info *info;
2761
2762 if (likely(!iommu_identity_mapping))
2763 return 0;
2764
David Woodhouse9b226622014-03-09 14:03:28 -07002765 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002766 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2767 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002768
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002769 return 0;
2770}
2771
Joerg Roedel28ccce02015-07-21 14:45:31 +02002772static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002773{
David Woodhouse0ac72662014-03-09 13:19:22 -07002774 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002775 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002776 u8 bus, devfn;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002777
David Woodhouse5913c9b2014-03-09 16:27:31 -07002778 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002779 if (!iommu)
2780 return -ENODEV;
2781
Joerg Roedel5db31562015-07-22 12:40:43 +02002782 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002783 if (ndomain != domain)
2784 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002785
2786 return 0;
2787}
2788
David Woodhouse0b9d9752014-03-09 15:48:15 -07002789static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002790{
2791 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002792 struct device *tmp;
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002793 int i;
2794
Jiang Liu0e242612014-02-19 14:07:34 +08002795 rcu_read_lock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002796 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002797 /*
2798 * Return TRUE if this RMRR contains the device that
2799 * is passed in.
2800 */
2801 for_each_active_dev_scope(rmrr->devices,
2802 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002803 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002804 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002805 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002806 }
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002807 }
Jiang Liu0e242612014-02-19 14:07:34 +08002808 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002809 return false;
2810}
2811
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002812/*
2813 * There are a couple cases where we need to restrict the functionality of
2814 * devices associated with RMRRs. The first is when evaluating a device for
2815 * identity mapping because problems exist when devices are moved in and out
2816 * of domains and their respective RMRR information is lost. This means that
2817 * a device with associated RMRRs will never be in a "passthrough" domain.
2818 * The second is use of the device through the IOMMU API. This interface
2819 * expects to have full control of the IOVA space for the device. We cannot
2820 * satisfy both the requirement that RMRR access is maintained and have an
2821 * unencumbered IOVA space. We also have no ability to quiesce the device's
2822 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2823 * We therefore prevent devices associated with an RMRR from participating in
2824 * the IOMMU API, which eliminates them from device assignment.
2825 *
2826 * In both cases we assume that PCI USB devices with RMRRs have them largely
2827 * for historical reasons and that the RMRR space is not actively used post
2828 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002829 *
2830 * The same exception is made for graphics devices, with the requirement that
2831 * any use of the RMRR regions will be torn down before assigning the device
2832 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002833 */
2834static bool device_is_rmrr_locked(struct device *dev)
2835{
2836 if (!device_has_rmrr(dev))
2837 return false;
2838
2839 if (dev_is_pci(dev)) {
2840 struct pci_dev *pdev = to_pci_dev(dev);
2841
David Woodhouse18436af2015-03-25 15:05:47 +00002842 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002843 return false;
2844 }
2845
2846 return true;
2847}
2848
David Woodhouse3bdb2592014-03-09 16:03:08 -07002849static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002850{
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002851
David Woodhouse3bdb2592014-03-09 16:03:08 -07002852 if (dev_is_pci(dev)) {
2853 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002854
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002855 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002856 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002857
David Woodhouse3bdb2592014-03-09 16:03:08 -07002858 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2859 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002860
David Woodhouse3bdb2592014-03-09 16:03:08 -07002861 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2862 return 1;
2863
2864 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2865 return 0;
2866
2867 /*
2868 * We want to start off with all devices in the 1:1 domain, and
2869 * take them out later if we find they can't access all of memory.
2870 *
2871 * However, we can't do this for PCI devices behind bridges,
2872 * because all PCI devices behind the same bridge will end up
2873 * with the same source-id on their transactions.
2874 *
2875 * Practically speaking, we can't change things around for these
2876 * devices at run-time, because we can't be sure there'll be no
2877 * DMA transactions in flight for any of their siblings.
2878 *
2879 * So PCI devices (unless they're on the root bus) as well as
2880 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2881 * the 1:1 domain, just in _case_ one of their siblings turns out
2882 * not to be able to map all of memory.
2883 */
2884 if (!pci_is_pcie(pdev)) {
2885 if (!pci_is_root_bus(pdev->bus))
2886 return 0;
2887 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2888 return 0;
2889 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2890 return 0;
2891 } else {
2892 if (device_has_rmrr(dev))
2893 return 0;
2894 }
David Woodhouse6941af22009-07-04 18:24:27 +01002895
David Woodhouse3dfc8132009-07-04 19:11:08 +01002896 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002897 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002898 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002899 * take them out of the 1:1 domain later.
2900 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002901 if (!startup) {
2902 /*
2903 * If the device's dma_mask is less than the system's memory
2904 * size then this is not a candidate for identity mapping.
2905 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002906 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002907
David Woodhouse3bdb2592014-03-09 16:03:08 -07002908 if (dev->coherent_dma_mask &&
2909 dev->coherent_dma_mask < dma_mask)
2910 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002911
David Woodhouse3bdb2592014-03-09 16:03:08 -07002912 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002913 }
David Woodhouse6941af22009-07-04 18:24:27 +01002914
2915 return 1;
2916}
2917
David Woodhousecf04eee2014-03-21 16:49:04 +00002918static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2919{
2920 int ret;
2921
2922 if (!iommu_should_identity_map(dev, 1))
2923 return 0;
2924
Joerg Roedel28ccce02015-07-21 14:45:31 +02002925 ret = domain_add_dev_info(si_domain, dev);
David Woodhousecf04eee2014-03-21 16:49:04 +00002926 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002927 pr_info("%s identity mapping for device %s\n",
2928 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002929 else if (ret == -ENODEV)
2930 /* device not associated with an iommu */
2931 ret = 0;
2932
2933 return ret;
2934}
2935
2936
Matt Kraai071e1372009-08-23 22:30:22 -07002937static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002938{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002939 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002940 struct dmar_drhd_unit *drhd;
2941 struct intel_iommu *iommu;
2942 struct device *dev;
2943 int i;
2944 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002945
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002946 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002947 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2948 if (ret)
2949 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002950 }
2951
David Woodhousecf04eee2014-03-21 16:49:04 +00002952 for_each_active_iommu(iommu, drhd)
2953 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2954 struct acpi_device_physical_node *pn;
2955 struct acpi_device *adev;
2956
2957 if (dev->bus != &acpi_bus_type)
2958 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002959
David Woodhousecf04eee2014-03-21 16:49:04 +00002960 adev= to_acpi_device(dev);
2961 mutex_lock(&adev->physical_node_lock);
2962 list_for_each_entry(pn, &adev->physical_node_list, node) {
2963 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2964 if (ret)
2965 break;
2966 }
2967 mutex_unlock(&adev->physical_node_lock);
2968 if (ret)
2969 return ret;
2970 }
2971
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002972 return 0;
2973}
2974
Jiang Liuffebeb42014-11-09 22:48:02 +08002975static void intel_iommu_init_qi(struct intel_iommu *iommu)
2976{
2977 /*
2978 * Start from the sane iommu hardware state.
2979 * If the queued invalidation is already initialized by us
2980 * (for example, while enabling interrupt-remapping) then
2981 * we got the things already rolling from a sane state.
2982 */
2983 if (!iommu->qi) {
2984 /*
2985 * Clear any previous faults.
2986 */
2987 dmar_fault(-1, iommu);
2988 /*
2989 * Disable queued invalidation if supported and already enabled
2990 * before OS handover.
2991 */
2992 dmar_disable_qi(iommu);
2993 }
2994
2995 if (dmar_enable_qi(iommu)) {
2996 /*
2997 * Queued Invalidate not enabled, use Register Based Invalidate
2998 */
2999 iommu->flush.flush_context = __iommu_flush_context;
3000 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003001 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003002 iommu->name);
3003 } else {
3004 iommu->flush.flush_context = qi_flush_context;
3005 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003006 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08003007 }
3008}
3009
Joerg Roedel091d42e2015-06-12 11:56:10 +02003010static int copy_context_table(struct intel_iommu *iommu,
Dan Williamsdfddb962015-10-09 18:16:46 -04003011 struct root_entry *old_re,
Joerg Roedel091d42e2015-06-12 11:56:10 +02003012 struct context_entry **tbl,
3013 int bus, bool ext)
3014{
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003015 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003016 struct context_entry *new_ce = NULL, ce;
Dan Williamsdfddb962015-10-09 18:16:46 -04003017 struct context_entry *old_ce = NULL;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003018 struct root_entry re;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003019 phys_addr_t old_ce_phys;
3020
3021 tbl_idx = ext ? bus * 2 : bus;
Dan Williamsdfddb962015-10-09 18:16:46 -04003022 memcpy(&re, old_re, sizeof(re));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003023
3024 for (devfn = 0; devfn < 256; devfn++) {
3025 /* First calculate the correct index */
3026 idx = (ext ? devfn * 2 : devfn) % 256;
3027
3028 if (idx == 0) {
3029 /* First save what we may have and clean up */
3030 if (new_ce) {
3031 tbl[tbl_idx] = new_ce;
3032 __iommu_flush_cache(iommu, new_ce,
3033 VTD_PAGE_SIZE);
3034 pos = 1;
3035 }
3036
3037 if (old_ce)
3038 iounmap(old_ce);
3039
3040 ret = 0;
3041 if (devfn < 0x80)
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003042 old_ce_phys = root_entry_lctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003043 else
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003044 old_ce_phys = root_entry_uctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003045
3046 if (!old_ce_phys) {
3047 if (ext && devfn == 0) {
3048 /* No LCTP, try UCTP */
3049 devfn = 0x7f;
3050 continue;
3051 } else {
3052 goto out;
3053 }
3054 }
3055
3056 ret = -ENOMEM;
Dan Williamsdfddb962015-10-09 18:16:46 -04003057 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3058 MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003059 if (!old_ce)
3060 goto out;
3061
3062 new_ce = alloc_pgtable_page(iommu->node);
3063 if (!new_ce)
3064 goto out_unmap;
3065
3066 ret = 0;
3067 }
3068
3069 /* Now copy the context entry */
Dan Williamsdfddb962015-10-09 18:16:46 -04003070 memcpy(&ce, old_ce + idx, sizeof(ce));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003071
Joerg Roedelcf484d02015-06-12 12:21:46 +02003072 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02003073 continue;
3074
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003075 did = context_domain_id(&ce);
3076 if (did >= 0 && did < cap_ndoms(iommu->cap))
3077 set_bit(did, iommu->domain_ids);
3078
Joerg Roedelcf484d02015-06-12 12:21:46 +02003079 /*
3080 * We need a marker for copied context entries. This
3081 * marker needs to work for the old format as well as
3082 * for extended context entries.
3083 *
3084 * Bit 67 of the context entry is used. In the old
3085 * format this bit is available to software, in the
3086 * extended format it is the PGE bit, but PGE is ignored
3087 * by HW if PASIDs are disabled (and thus still
3088 * available).
3089 *
3090 * So disable PASIDs first and then mark the entry
3091 * copied. This means that we don't copy PASID
3092 * translations from the old kernel, but this is fine as
3093 * faults there are not fatal.
3094 */
3095 context_clear_pasid_enable(&ce);
3096 context_set_copied(&ce);
3097
Joerg Roedel091d42e2015-06-12 11:56:10 +02003098 new_ce[idx] = ce;
3099 }
3100
3101 tbl[tbl_idx + pos] = new_ce;
3102
3103 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3104
3105out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003106 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003107
3108out:
3109 return ret;
3110}
3111
3112static int copy_translation_tables(struct intel_iommu *iommu)
3113{
3114 struct context_entry **ctxt_tbls;
Dan Williamsdfddb962015-10-09 18:16:46 -04003115 struct root_entry *old_rt;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003116 phys_addr_t old_rt_phys;
3117 int ctxt_table_entries;
3118 unsigned long flags;
3119 u64 rtaddr_reg;
3120 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02003121 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003122
3123 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3124 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02003125 new_ext = !!ecap_ecs(iommu->ecap);
3126
3127 /*
3128 * The RTT bit can only be changed when translation is disabled,
3129 * but disabling translation means to open a window for data
3130 * corruption. So bail out and don't copy anything if we would
3131 * have to change the bit.
3132 */
3133 if (new_ext != ext)
3134 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003135
3136 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3137 if (!old_rt_phys)
3138 return -EINVAL;
3139
Dan Williamsdfddb962015-10-09 18:16:46 -04003140 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003141 if (!old_rt)
3142 return -ENOMEM;
3143
3144 /* This is too big for the stack - allocate it from slab */
3145 ctxt_table_entries = ext ? 512 : 256;
3146 ret = -ENOMEM;
3147 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3148 if (!ctxt_tbls)
3149 goto out_unmap;
3150
3151 for (bus = 0; bus < 256; bus++) {
3152 ret = copy_context_table(iommu, &old_rt[bus],
3153 ctxt_tbls, bus, ext);
3154 if (ret) {
3155 pr_err("%s: Failed to copy context table for bus %d\n",
3156 iommu->name, bus);
3157 continue;
3158 }
3159 }
3160
3161 spin_lock_irqsave(&iommu->lock, flags);
3162
3163 /* Context tables are copied, now write them to the root_entry table */
3164 for (bus = 0; bus < 256; bus++) {
3165 int idx = ext ? bus * 2 : bus;
3166 u64 val;
3167
3168 if (ctxt_tbls[idx]) {
3169 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3170 iommu->root_entry[bus].lo = val;
3171 }
3172
3173 if (!ext || !ctxt_tbls[idx + 1])
3174 continue;
3175
3176 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3177 iommu->root_entry[bus].hi = val;
3178 }
3179
3180 spin_unlock_irqrestore(&iommu->lock, flags);
3181
3182 kfree(ctxt_tbls);
3183
3184 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3185
3186 ret = 0;
3187
3188out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003189 memunmap(old_rt);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003190
3191 return ret;
3192}
3193
Joseph Cihulab7792602011-05-03 00:08:37 -07003194static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003195{
3196 struct dmar_drhd_unit *drhd;
3197 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003198 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003199 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003200 struct intel_iommu *iommu;
Omer Pelegaa473242016-04-20 11:33:02 +03003201 int i, ret, cpu;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003202
3203 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003204 * for each drhd
3205 * allocate root
3206 * initialize and program root entry to not present
3207 * endfor
3208 */
3209 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003210 /*
3211 * lock not needed as this is only incremented in the single
3212 * threaded kernel __init code path all other access are read
3213 * only
3214 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003215 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003216 g_num_of_iommus++;
3217 continue;
3218 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003219 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003220 }
3221
Jiang Liuffebeb42014-11-09 22:48:02 +08003222 /* Preallocate enough resources for IOMMU hot-addition */
3223 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3224 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3225
Weidong Hand9630fe2008-12-08 11:06:32 +08003226 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3227 GFP_KERNEL);
3228 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003229 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003230 ret = -ENOMEM;
3231 goto error;
3232 }
3233
Omer Pelegaa473242016-04-20 11:33:02 +03003234 for_each_possible_cpu(cpu) {
3235 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3236 cpu);
3237
3238 dfd->tables = kzalloc(g_num_of_iommus *
3239 sizeof(struct deferred_flush_table),
3240 GFP_KERNEL);
3241 if (!dfd->tables) {
3242 ret = -ENOMEM;
3243 goto free_g_iommus;
3244 }
3245
3246 spin_lock_init(&dfd->lock);
3247 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
mark gross5e0d2a62008-03-04 15:22:08 -08003248 }
3249
Jiang Liu7c919772014-01-06 14:18:18 +08003250 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003251 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003252
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003253 intel_iommu_init_qi(iommu);
3254
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003255 ret = iommu_init_domains(iommu);
3256 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003257 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003258
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003259 init_translation_status(iommu);
3260
Joerg Roedel091d42e2015-06-12 11:56:10 +02003261 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3262 iommu_disable_translation(iommu);
3263 clear_translation_pre_enabled(iommu);
3264 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3265 iommu->name);
3266 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003267
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003268 /*
3269 * TBD:
3270 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003271 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003272 */
3273 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003274 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003275 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003276
Joerg Roedel091d42e2015-06-12 11:56:10 +02003277 if (translation_pre_enabled(iommu)) {
3278 pr_info("Translation already enabled - trying to copy translation structures\n");
3279
3280 ret = copy_translation_tables(iommu);
3281 if (ret) {
3282 /*
3283 * We found the IOMMU with translation
3284 * enabled - but failed to copy over the
3285 * old root-entry table. Try to proceed
3286 * by disabling translation now and
3287 * allocating a clean root-entry table.
3288 * This might cause DMAR faults, but
3289 * probably the dump will still succeed.
3290 */
3291 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3292 iommu->name);
3293 iommu_disable_translation(iommu);
3294 clear_translation_pre_enabled(iommu);
3295 } else {
3296 pr_info("Copied translation tables from previous kernel for %s\n",
3297 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003298 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003299 }
3300 }
3301
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003302 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003303 hw_pass_through = 0;
David Woodhouse8a94ade2015-03-24 14:54:56 +00003304#ifdef CONFIG_INTEL_IOMMU_SVM
3305 if (pasid_enabled(iommu))
3306 intel_svm_alloc_pasid_tables(iommu);
3307#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308 }
3309
Joerg Roedela4c34ff2016-06-17 11:29:48 +02003310 /*
3311 * Now that qi is enabled on all iommus, set the root entry and flush
3312 * caches. This is required on some Intel X58 chipsets, otherwise the
3313 * flush_context function will loop forever and the boot hangs.
3314 */
3315 for_each_active_iommu(iommu, drhd) {
3316 iommu_flush_write_buffer(iommu);
3317 iommu_set_root_entry(iommu);
3318 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3319 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3320 }
3321
David Woodhouse19943b02009-08-04 16:19:20 +01003322 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003323 iommu_identity_mapping |= IDENTMAP_ALL;
3324
Suresh Siddhad3f13812011-08-23 17:05:25 -07003325#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003326 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003327#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003328
Ashok Raj21e722c2017-01-30 09:39:53 -08003329 check_tylersburg_isoch();
3330
Joerg Roedel86080cc2015-06-12 12:27:16 +02003331 if (iommu_identity_mapping) {
3332 ret = si_domain_init(hw_pass_through);
3333 if (ret)
3334 goto free_iommu;
3335 }
3336
David Woodhousee0fc7e02009-09-30 09:12:17 -07003337
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003338 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003339 * If we copied translations from a previous kernel in the kdump
3340 * case, we can not assign the devices to domains now, as that
3341 * would eliminate the old mappings. So skip this part and defer
3342 * the assignment to device driver initialization time.
3343 */
3344 if (copied_tables)
3345 goto domains_done;
3346
3347 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003348 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003349 * identity mappings for rmrr, gfx, and isa and may fall back to static
3350 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003351 */
David Woodhouse19943b02009-08-04 16:19:20 +01003352 if (iommu_identity_mapping) {
3353 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3354 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003355 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003356 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003357 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003358 }
David Woodhouse19943b02009-08-04 16:19:20 +01003359 /*
3360 * For each rmrr
3361 * for each dev attached to rmrr
3362 * do
3363 * locate drhd for dev, alloc domain for dev
3364 * allocate free domain
3365 * allocate page table entries for rmrr
3366 * if context not allocated for bus
3367 * allocate and init context
3368 * set present in root table for this bus
3369 * init context with domain, translation etc
3370 * endfor
3371 * endfor
3372 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003373 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003374 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003375 /* some BIOS lists non-exist devices in DMAR table. */
3376 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003377 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003378 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003379 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003380 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003381 }
3382 }
3383
3384 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003385
Joerg Roedela87f4912015-06-12 12:32:54 +02003386domains_done:
3387
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003388 /*
3389 * for each drhd
3390 * enable fault log
3391 * global invalidate context cache
3392 * global invalidate iotlb
3393 * enable translation
3394 */
Jiang Liu7c919772014-01-06 14:18:18 +08003395 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003396 if (drhd->ignored) {
3397 /*
3398 * we always have to disable PMRs or DMA may fail on
3399 * this device
3400 */
3401 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003402 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003403 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003404 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003405
3406 iommu_flush_write_buffer(iommu);
3407
David Woodhousea222a7f2015-10-07 23:35:18 +01003408#ifdef CONFIG_INTEL_IOMMU_SVM
3409 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3410 ret = intel_svm_enable_prq(iommu);
3411 if (ret)
3412 goto free_iommu;
3413 }
3414#endif
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003415 ret = dmar_set_interrupt(iommu);
3416 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003417 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003418
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003419 if (!translation_pre_enabled(iommu))
3420 iommu_enable_translation(iommu);
3421
David Woodhouseb94996c2009-09-19 15:28:12 -07003422 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003423 }
3424
3425 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003426
3427free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003428 for_each_active_iommu(iommu, drhd) {
3429 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003430 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003431 }
Jiang Liu989d51f2014-02-19 14:07:21 +08003432free_g_iommus:
Omer Pelegaa473242016-04-20 11:33:02 +03003433 for_each_possible_cpu(cpu)
3434 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
Weidong Hand9630fe2008-12-08 11:06:32 +08003435 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08003436error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003437 return ret;
3438}
3439
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003440/* This takes a number of _MM_ pages, not VTD pages */
Omer Peleg2aac6302016-04-20 11:33:57 +03003441static unsigned long intel_alloc_iova(struct device *dev,
David Woodhouse875764d2009-06-28 21:20:51 +01003442 struct dmar_domain *domain,
3443 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003444{
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003445 unsigned long iova_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003446
David Woodhouse875764d2009-06-28 21:20:51 +01003447 /* Restrict dma_mask to the width that the iommu can handle */
3448 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
Robin Murphy8f6429c2015-07-16 19:40:12 +01003449 /* Ensure we reserve the whole size-aligned region */
3450 nrpages = __roundup_pow_of_two(nrpages);
David Woodhouse875764d2009-06-28 21:20:51 +01003451
3452 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003453 /*
3454 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003455 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003456 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003457 */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003458 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3459 IOVA_PFN(DMA_BIT_MASK(32)));
3460 if (iova_pfn)
3461 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003462 }
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003463 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3464 if (unlikely(!iova_pfn)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003465 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003466 nrpages, dev_name(dev));
Omer Peleg2aac6302016-04-20 11:33:57 +03003467 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003468 }
3469
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003470 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003471}
3472
David Woodhoused4b709f2014-03-09 16:07:40 -07003473static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003474{
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003475 struct dmar_domain *domain, *tmp;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003476 struct dmar_rmrr_unit *rmrr;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003477 struct device *i_dev;
3478 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003479
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003480 domain = find_domain(dev);
3481 if (domain)
3482 goto out;
3483
3484 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3485 if (!domain)
3486 goto out;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003487
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003488 /* We have a new domain - setup possible RMRRs for the device */
3489 rcu_read_lock();
3490 for_each_rmrr_units(rmrr) {
3491 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3492 i, i_dev) {
3493 if (i_dev != dev)
3494 continue;
3495
3496 ret = domain_prepare_identity_map(dev, domain,
3497 rmrr->base_address,
3498 rmrr->end_address);
3499 if (ret)
3500 dev_err(dev, "Mapping reserved region failed\n");
3501 }
3502 }
3503 rcu_read_unlock();
3504
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003505 tmp = set_domain_for_dev(dev, domain);
3506 if (!tmp || domain != tmp) {
3507 domain_exit(domain);
3508 domain = tmp;
3509 }
3510
3511out:
3512
3513 if (!domain)
3514 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3515
3516
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003517 return domain;
3518}
3519
David Woodhoused4b709f2014-03-09 16:07:40 -07003520static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01003521{
3522 struct device_domain_info *info;
3523
3524 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003525 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003526 if (likely(info))
3527 return info->domain;
3528
3529 return __get_valid_domain_for_dev(dev);
3530}
3531
David Woodhouseecb509e2014-03-09 16:29:55 -07003532/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003533static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003534{
3535 int found;
3536
David Woodhouse3d891942014-03-06 15:59:26 +00003537 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003538 return 1;
3539
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003540 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003541 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003542
David Woodhouse9b226622014-03-09 14:03:28 -07003543 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003544 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003545 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003546 return 1;
3547 else {
3548 /*
3549 * 32 bit DMA is removed from si_domain and fall back
3550 * to non-identity mapping.
3551 */
Joerg Roedele6de0f82015-07-22 16:30:36 +02003552 dmar_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003553 pr_info("32bit %s uses non-identity mapping\n",
3554 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003555 return 0;
3556 }
3557 } else {
3558 /*
3559 * In case of a detached 64 bit DMA device from vm, the device
3560 * is put into si_domain for identity mapping.
3561 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003562 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003563 int ret;
Joerg Roedel28ccce02015-07-21 14:45:31 +02003564 ret = domain_add_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003565 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003566 pr_info("64bit %s uses identity mapping\n",
3567 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003568 return 1;
3569 }
3570 }
3571 }
3572
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003573 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003574}
3575
David Woodhouse5040a912014-03-09 16:14:00 -07003576static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003577 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003578{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003579 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003580 phys_addr_t start_paddr;
Omer Peleg2aac6302016-04-20 11:33:57 +03003581 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003582 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003583 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003584 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003585 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003586
3587 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003588
David Woodhouse5040a912014-03-09 16:14:00 -07003589 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003590 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003591
David Woodhouse5040a912014-03-09 16:14:00 -07003592 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003593 if (!domain)
3594 return 0;
3595
Weidong Han8c11e792008-12-08 15:29:22 +08003596 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003597 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003598
Omer Peleg2aac6302016-04-20 11:33:57 +03003599 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3600 if (!iova_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003601 goto error;
3602
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003603 /*
3604 * Check if DMAR supports zero-length reads on write only
3605 * mappings..
3606 */
3607 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003608 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003609 prot |= DMA_PTE_READ;
3610 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3611 prot |= DMA_PTE_WRITE;
3612 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003613 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003614 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003615 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003616 * is not a big problem
3617 */
Omer Peleg2aac6302016-04-20 11:33:57 +03003618 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003619 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003620 if (ret)
3621 goto error;
3622
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003623 /* it's a non-present to present mapping. Only flush if caching mode */
3624 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003625 iommu_flush_iotlb_psi(iommu, domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003626 mm_to_dma_pfn(iova_pfn),
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003627 size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003628 else
Weidong Han8c11e792008-12-08 15:29:22 +08003629 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003630
Omer Peleg2aac6302016-04-20 11:33:57 +03003631 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
David Woodhouse03d6a242009-06-28 15:33:46 +01003632 start_paddr += paddr & ~PAGE_MASK;
3633 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003634
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003635error:
Omer Peleg2aac6302016-04-20 11:33:57 +03003636 if (iova_pfn)
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003637 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003638 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003639 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003640 return 0;
3641}
3642
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003643static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3644 unsigned long offset, size_t size,
3645 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003646 unsigned long attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003647{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003648 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003649 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003650}
3651
Omer Pelegaa473242016-04-20 11:33:02 +03003652static void flush_unmaps(struct deferred_flush_data *flush_data)
mark gross5e0d2a62008-03-04 15:22:08 -08003653{
mark gross80b20dd2008-04-18 13:53:58 -07003654 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003655
Omer Pelegaa473242016-04-20 11:33:02 +03003656 flush_data->timer_on = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003657
3658 /* just flush them all */
3659 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003660 struct intel_iommu *iommu = g_iommus[i];
Omer Pelegaa473242016-04-20 11:33:02 +03003661 struct deferred_flush_table *flush_table =
3662 &flush_data->tables[i];
Weidong Hana2bb8452008-12-08 11:24:12 +08003663 if (!iommu)
3664 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003665
Omer Pelegaa473242016-04-20 11:33:02 +03003666 if (!flush_table->next)
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003667 continue;
3668
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003669 /* In caching mode, global flushes turn emulation expensive */
3670 if (!cap_caching_mode(iommu->cap))
3671 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003672 DMA_TLB_GLOBAL_FLUSH);
Omer Pelegaa473242016-04-20 11:33:02 +03003673 for (j = 0; j < flush_table->next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003674 unsigned long mask;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003675 struct deferred_flush_entry *entry =
Omer Pelegaa473242016-04-20 11:33:02 +03003676 &flush_table->entries[j];
Omer Peleg2aac6302016-04-20 11:33:57 +03003677 unsigned long iova_pfn = entry->iova_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003678 unsigned long nrpages = entry->nrpages;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003679 struct dmar_domain *domain = entry->domain;
3680 struct page *freelist = entry->freelist;
Yu Zhao93a23a72009-05-18 13:51:37 +08003681
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003682 /* On real hardware multiple invalidations are expensive */
3683 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003684 iommu_flush_iotlb_psi(iommu, domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003685 mm_to_dma_pfn(iova_pfn),
Omer Peleg769530e2016-04-20 11:33:25 +03003686 nrpages, !freelist, 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003687 else {
Omer Peleg769530e2016-04-20 11:33:25 +03003688 mask = ilog2(nrpages);
Omer Peleg314f1dc2016-04-20 11:32:45 +03003689 iommu_flush_dev_iotlb(domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003690 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003691 }
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003692 free_iova_fast(&domain->iovad, iova_pfn, nrpages);
Omer Peleg314f1dc2016-04-20 11:32:45 +03003693 if (freelist)
3694 dma_free_pagelist(freelist);
mark gross80b20dd2008-04-18 13:53:58 -07003695 }
Omer Pelegaa473242016-04-20 11:33:02 +03003696 flush_table->next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003697 }
3698
Omer Pelegaa473242016-04-20 11:33:02 +03003699 flush_data->size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003700}
3701
Omer Pelegaa473242016-04-20 11:33:02 +03003702static void flush_unmaps_timeout(unsigned long cpuid)
mark gross5e0d2a62008-03-04 15:22:08 -08003703{
Omer Pelegaa473242016-04-20 11:33:02 +03003704 struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
mark gross80b20dd2008-04-18 13:53:58 -07003705 unsigned long flags;
3706
Omer Pelegaa473242016-04-20 11:33:02 +03003707 spin_lock_irqsave(&flush_data->lock, flags);
3708 flush_unmaps(flush_data);
3709 spin_unlock_irqrestore(&flush_data->lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003710}
3711
Omer Peleg2aac6302016-04-20 11:33:57 +03003712static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003713 unsigned long nrpages, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003714{
3715 unsigned long flags;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003716 int entry_id, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003717 struct intel_iommu *iommu;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003718 struct deferred_flush_entry *entry;
Omer Pelegaa473242016-04-20 11:33:02 +03003719 struct deferred_flush_data *flush_data;
3720 unsigned int cpuid;
mark gross5e0d2a62008-03-04 15:22:08 -08003721
Omer Pelegaa473242016-04-20 11:33:02 +03003722 cpuid = get_cpu();
3723 flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3724
3725 /* Flush all CPUs' entries to avoid deferring too much. If
3726 * this becomes a bottleneck, can just flush us, and rely on
3727 * flush timer for the rest.
3728 */
3729 if (flush_data->size == HIGH_WATER_MARK) {
3730 int cpu;
3731
3732 for_each_online_cpu(cpu)
3733 flush_unmaps_timeout(cpu);
3734 }
3735
3736 spin_lock_irqsave(&flush_data->lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003737
Weidong Han8c11e792008-12-08 15:29:22 +08003738 iommu = domain_get_iommu(dom);
3739 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003740
Omer Pelegaa473242016-04-20 11:33:02 +03003741 entry_id = flush_data->tables[iommu_id].next;
3742 ++(flush_data->tables[iommu_id].next);
mark gross5e0d2a62008-03-04 15:22:08 -08003743
Omer Pelegaa473242016-04-20 11:33:02 +03003744 entry = &flush_data->tables[iommu_id].entries[entry_id];
Omer Peleg314f1dc2016-04-20 11:32:45 +03003745 entry->domain = dom;
Omer Peleg2aac6302016-04-20 11:33:57 +03003746 entry->iova_pfn = iova_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003747 entry->nrpages = nrpages;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003748 entry->freelist = freelist;
mark gross5e0d2a62008-03-04 15:22:08 -08003749
Omer Pelegaa473242016-04-20 11:33:02 +03003750 if (!flush_data->timer_on) {
3751 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3752 flush_data->timer_on = 1;
mark gross5e0d2a62008-03-04 15:22:08 -08003753 }
Omer Pelegaa473242016-04-20 11:33:02 +03003754 flush_data->size++;
3755 spin_unlock_irqrestore(&flush_data->lock, flags);
3756
3757 put_cpu();
mark gross5e0d2a62008-03-04 15:22:08 -08003758}
3759
Omer Peleg769530e2016-04-20 11:33:25 +03003760static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003761{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003762 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003763 unsigned long start_pfn, last_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003764 unsigned long nrpages;
Omer Peleg2aac6302016-04-20 11:33:57 +03003765 unsigned long iova_pfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003766 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003767 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003768
David Woodhouse73676832009-07-04 14:08:36 +01003769 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003770 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003771
David Woodhouse1525a292014-03-06 16:19:30 +00003772 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003773 BUG_ON(!domain);
3774
Weidong Han8c11e792008-12-08 15:29:22 +08003775 iommu = domain_get_iommu(domain);
3776
Omer Peleg2aac6302016-04-20 11:33:57 +03003777 iova_pfn = IOVA_PFN(dev_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003778
Omer Peleg769530e2016-04-20 11:33:25 +03003779 nrpages = aligned_nrpages(dev_addr, size);
Omer Peleg2aac6302016-04-20 11:33:57 +03003780 start_pfn = mm_to_dma_pfn(iova_pfn);
Omer Peleg769530e2016-04-20 11:33:25 +03003781 last_pfn = start_pfn + nrpages - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003782
David Woodhoused794dc92009-06-28 00:27:49 +01003783 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003784 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003785
David Woodhouseea8ea462014-03-05 17:09:32 +00003786 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003787
mark gross5e0d2a62008-03-04 15:22:08 -08003788 if (intel_iommu_strict) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003789 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003790 nrpages, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003791 /* free iova */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003792 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
David Woodhouseea8ea462014-03-05 17:09:32 +00003793 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003794 } else {
Omer Peleg2aac6302016-04-20 11:33:57 +03003795 add_unmap(domain, iova_pfn, nrpages, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003796 /*
3797 * queue up the release of the unmap to save the 1/6th of the
3798 * cpu used up by the iotlb flush operation...
3799 */
mark gross5e0d2a62008-03-04 15:22:08 -08003800 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003801}
3802
Jiang Liud41a4ad2014-07-11 14:19:34 +08003803static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3804 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003805 unsigned long attrs)
Jiang Liud41a4ad2014-07-11 14:19:34 +08003806{
Omer Peleg769530e2016-04-20 11:33:25 +03003807 intel_unmap(dev, dev_addr, size);
Jiang Liud41a4ad2014-07-11 14:19:34 +08003808}
3809
David Woodhouse5040a912014-03-09 16:14:00 -07003810static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003811 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003812 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003813{
Akinobu Mita36746432014-06-04 16:06:51 -07003814 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003815 int order;
3816
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003817 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003818 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003819
David Woodhouse5040a912014-03-09 16:14:00 -07003820 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003821 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003822 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3823 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003824 flags |= GFP_DMA;
3825 else
3826 flags |= GFP_DMA32;
3827 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003828
Mel Gormand0164ad2015-11-06 16:28:21 -08003829 if (gfpflags_allow_blocking(flags)) {
Akinobu Mita36746432014-06-04 16:06:51 -07003830 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003831
Lucas Stach712c6042017-02-24 14:58:44 -08003832 page = dma_alloc_from_contiguous(dev, count, order, flags);
Akinobu Mita36746432014-06-04 16:06:51 -07003833 if (page && iommu_no_mapping(dev) &&
3834 page_to_phys(page) + size > dev->coherent_dma_mask) {
3835 dma_release_from_contiguous(dev, page, count);
3836 page = NULL;
3837 }
3838 }
3839
3840 if (!page)
3841 page = alloc_pages(flags, order);
3842 if (!page)
3843 return NULL;
3844 memset(page_address(page), 0, size);
3845
3846 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003847 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003848 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003849 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003850 return page_address(page);
3851 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3852 __free_pages(page, order);
3853
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003854 return NULL;
3855}
3856
David Woodhouse5040a912014-03-09 16:14:00 -07003857static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003858 dma_addr_t dma_handle, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003859{
3860 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003861 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003862
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003863 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003864 order = get_order(size);
3865
Omer Peleg769530e2016-04-20 11:33:25 +03003866 intel_unmap(dev, dma_handle, size);
Akinobu Mita36746432014-06-04 16:06:51 -07003867 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3868 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003869}
3870
David Woodhouse5040a912014-03-09 16:14:00 -07003871static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003872 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003873 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003874{
Omer Peleg769530e2016-04-20 11:33:25 +03003875 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3876 unsigned long nrpages = 0;
3877 struct scatterlist *sg;
3878 int i;
3879
3880 for_each_sg(sglist, sg, nelems, i) {
3881 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3882 }
3883
3884 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003885}
3886
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003887static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003888 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003889{
3890 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003891 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003892
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003893 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003894 BUG_ON(!sg_page(sg));
Dan Williams3e6110f2015-12-15 12:54:06 -08003895 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003896 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003897 }
3898 return nelems;
3899}
3900
David Woodhouse5040a912014-03-09 16:14:00 -07003901static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003902 enum dma_data_direction dir, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003903{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003904 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003905 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003906 size_t size = 0;
3907 int prot = 0;
Omer Peleg2aac6302016-04-20 11:33:57 +03003908 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003909 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003910 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003911 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003912 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003913
3914 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003915 if (iommu_no_mapping(dev))
3916 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003917
David Woodhouse5040a912014-03-09 16:14:00 -07003918 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003919 if (!domain)
3920 return 0;
3921
Weidong Han8c11e792008-12-08 15:29:22 +08003922 iommu = domain_get_iommu(domain);
3923
David Woodhouseb536d242009-06-28 14:49:31 +01003924 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003925 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003926
Omer Peleg2aac6302016-04-20 11:33:57 +03003927 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
David Woodhouse5040a912014-03-09 16:14:00 -07003928 *dev->dma_mask);
Omer Peleg2aac6302016-04-20 11:33:57 +03003929 if (!iova_pfn) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003930 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003931 return 0;
3932 }
3933
3934 /*
3935 * Check if DMAR supports zero-length reads on write only
3936 * mappings..
3937 */
3938 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003939 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003940 prot |= DMA_PTE_READ;
3941 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3942 prot |= DMA_PTE_WRITE;
3943
Omer Peleg2aac6302016-04-20 11:33:57 +03003944 start_vpfn = mm_to_dma_pfn(iova_pfn);
David Woodhousee1605492009-06-29 11:17:38 +01003945
Fenghua Yuf5329592009-08-04 15:09:37 -07003946 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003947 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003948 dma_pte_free_pagetable(domain, start_vpfn,
3949 start_vpfn + size - 1);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003950 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
David Woodhousee1605492009-06-29 11:17:38 +01003951 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003952 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003953
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003954 /* it's a non-present to present mapping. Only flush if caching mode */
3955 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003956 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003957 else
Weidong Han8c11e792008-12-08 15:29:22 +08003958 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003959
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003960 return nelems;
3961}
3962
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003963static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3964{
3965 return !dma_addr;
3966}
3967
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003968struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003969 .alloc = intel_alloc_coherent,
3970 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003971 .map_sg = intel_map_sg,
3972 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003973 .map_page = intel_map_page,
3974 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003975 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003976};
3977
3978static inline int iommu_domain_cache_init(void)
3979{
3980 int ret = 0;
3981
3982 iommu_domain_cache = kmem_cache_create("iommu_domain",
3983 sizeof(struct dmar_domain),
3984 0,
3985 SLAB_HWCACHE_ALIGN,
3986
3987 NULL);
3988 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003989 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003990 ret = -ENOMEM;
3991 }
3992
3993 return ret;
3994}
3995
3996static inline int iommu_devinfo_cache_init(void)
3997{
3998 int ret = 0;
3999
4000 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
4001 sizeof(struct device_domain_info),
4002 0,
4003 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004004 NULL);
4005 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004006 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004007 ret = -ENOMEM;
4008 }
4009
4010 return ret;
4011}
4012
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004013static int __init iommu_init_mempool(void)
4014{
4015 int ret;
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004016 ret = iova_cache_get();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004017 if (ret)
4018 return ret;
4019
4020 ret = iommu_domain_cache_init();
4021 if (ret)
4022 goto domain_error;
4023
4024 ret = iommu_devinfo_cache_init();
4025 if (!ret)
4026 return ret;
4027
4028 kmem_cache_destroy(iommu_domain_cache);
4029domain_error:
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004030 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004031
4032 return -ENOMEM;
4033}
4034
4035static void __init iommu_exit_mempool(void)
4036{
4037 kmem_cache_destroy(iommu_devinfo_cache);
4038 kmem_cache_destroy(iommu_domain_cache);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004039 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004040}
4041
Dan Williams556ab452010-07-23 15:47:56 -07004042static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4043{
4044 struct dmar_drhd_unit *drhd;
4045 u32 vtbar;
4046 int rc;
4047
4048 /* We know that this device on this chipset has its own IOMMU.
4049 * If we find it under a different IOMMU, then the BIOS is lying
4050 * to us. Hope that the IOMMU for this device is actually
4051 * disabled, and it needs no translation...
4052 */
4053 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4054 if (rc) {
4055 /* "can't" happen */
4056 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4057 return;
4058 }
4059 vtbar &= 0xffff0000;
4060
4061 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4062 drhd = dmar_find_matched_drhd_unit(pdev);
4063 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4064 TAINT_FIRMWARE_WORKAROUND,
4065 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4066 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4067}
4068DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4069
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004070static void __init init_no_remapping_devices(void)
4071{
4072 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00004073 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08004074 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004075
4076 for_each_drhd_unit(drhd) {
4077 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08004078 for_each_active_dev_scope(drhd->devices,
4079 drhd->devices_cnt, i, dev)
4080 break;
David Woodhouse832bd852014-03-07 15:08:36 +00004081 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004082 if (i == drhd->devices_cnt)
4083 drhd->ignored = 1;
4084 }
4085 }
4086
Jiang Liu7c919772014-01-06 14:18:18 +08004087 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08004088 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004089 continue;
4090
Jiang Liub683b232014-02-19 14:07:32 +08004091 for_each_active_dev_scope(drhd->devices,
4092 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00004093 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004094 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004095 if (i < drhd->devices_cnt)
4096 continue;
4097
David Woodhousec0771df2011-10-14 20:59:46 +01004098 /* This IOMMU has *only* gfx devices. Either bypass it or
4099 set the gfx_mapped flag, as appropriate */
4100 if (dmar_map_gfx) {
4101 intel_iommu_gfx_mapped = 1;
4102 } else {
4103 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08004104 for_each_active_dev_scope(drhd->devices,
4105 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00004106 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004107 }
4108 }
4109}
4110
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004111#ifdef CONFIG_SUSPEND
4112static int init_iommu_hw(void)
4113{
4114 struct dmar_drhd_unit *drhd;
4115 struct intel_iommu *iommu = NULL;
4116
4117 for_each_active_iommu(iommu, drhd)
4118 if (iommu->qi)
4119 dmar_reenable_qi(iommu);
4120
Joseph Cihulab7792602011-05-03 00:08:37 -07004121 for_each_iommu(iommu, drhd) {
4122 if (drhd->ignored) {
4123 /*
4124 * we always have to disable PMRs or DMA may fail on
4125 * this device
4126 */
4127 if (force_on)
4128 iommu_disable_protect_mem_regions(iommu);
4129 continue;
4130 }
4131
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004132 iommu_flush_write_buffer(iommu);
4133
4134 iommu_set_root_entry(iommu);
4135
4136 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004137 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08004138 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4139 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07004140 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004141 }
4142
4143 return 0;
4144}
4145
4146static void iommu_flush_all(void)
4147{
4148 struct dmar_drhd_unit *drhd;
4149 struct intel_iommu *iommu;
4150
4151 for_each_active_iommu(iommu, drhd) {
4152 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004153 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004154 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004155 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004156 }
4157}
4158
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004159static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004160{
4161 struct dmar_drhd_unit *drhd;
4162 struct intel_iommu *iommu = NULL;
4163 unsigned long flag;
4164
4165 for_each_active_iommu(iommu, drhd) {
4166 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4167 GFP_ATOMIC);
4168 if (!iommu->iommu_state)
4169 goto nomem;
4170 }
4171
4172 iommu_flush_all();
4173
4174 for_each_active_iommu(iommu, drhd) {
4175 iommu_disable_translation(iommu);
4176
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004177 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004178
4179 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4180 readl(iommu->reg + DMAR_FECTL_REG);
4181 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4182 readl(iommu->reg + DMAR_FEDATA_REG);
4183 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4184 readl(iommu->reg + DMAR_FEADDR_REG);
4185 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4186 readl(iommu->reg + DMAR_FEUADDR_REG);
4187
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004188 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004189 }
4190 return 0;
4191
4192nomem:
4193 for_each_active_iommu(iommu, drhd)
4194 kfree(iommu->iommu_state);
4195
4196 return -ENOMEM;
4197}
4198
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004199static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004200{
4201 struct dmar_drhd_unit *drhd;
4202 struct intel_iommu *iommu = NULL;
4203 unsigned long flag;
4204
4205 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07004206 if (force_on)
4207 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4208 else
4209 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004210 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004211 }
4212
4213 for_each_active_iommu(iommu, drhd) {
4214
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004215 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004216
4217 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4218 iommu->reg + DMAR_FECTL_REG);
4219 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4220 iommu->reg + DMAR_FEDATA_REG);
4221 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4222 iommu->reg + DMAR_FEADDR_REG);
4223 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4224 iommu->reg + DMAR_FEUADDR_REG);
4225
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004226 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004227 }
4228
4229 for_each_active_iommu(iommu, drhd)
4230 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004231}
4232
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004233static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004234 .resume = iommu_resume,
4235 .suspend = iommu_suspend,
4236};
4237
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004238static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004239{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004240 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004241}
4242
4243#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02004244static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004245#endif /* CONFIG_PM */
4246
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004247
Jiang Liuc2a0b532014-11-09 22:47:56 +08004248int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004249{
4250 struct acpi_dmar_reserved_memory *rmrr;
Eric Auger0659b8d2017-01-19 20:57:53 +00004251 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004252 struct dmar_rmrr_unit *rmrru;
Eric Auger0659b8d2017-01-19 20:57:53 +00004253 size_t length;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004254
4255 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4256 if (!rmrru)
Eric Auger0659b8d2017-01-19 20:57:53 +00004257 goto out;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004258
4259 rmrru->hdr = header;
4260 rmrr = (struct acpi_dmar_reserved_memory *)header;
4261 rmrru->base_address = rmrr->base_address;
4262 rmrru->end_address = rmrr->end_address;
Eric Auger0659b8d2017-01-19 20:57:53 +00004263
4264 length = rmrr->end_address - rmrr->base_address + 1;
4265 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4266 IOMMU_RESV_DIRECT);
4267 if (!rmrru->resv)
4268 goto free_rmrru;
4269
Jiang Liu2e455282014-02-19 14:07:36 +08004270 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4271 ((void *)rmrr) + rmrr->header.length,
4272 &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004273 if (rmrru->devices_cnt && rmrru->devices == NULL)
4274 goto free_all;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004275
Jiang Liu2e455282014-02-19 14:07:36 +08004276 list_add(&rmrru->list, &dmar_rmrr_units);
4277
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004278 return 0;
Eric Auger0659b8d2017-01-19 20:57:53 +00004279free_all:
4280 kfree(rmrru->resv);
4281free_rmrru:
4282 kfree(rmrru);
4283out:
4284 return -ENOMEM;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004285}
4286
Jiang Liu6b197242014-11-09 22:47:58 +08004287static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4288{
4289 struct dmar_atsr_unit *atsru;
4290 struct acpi_dmar_atsr *tmp;
4291
4292 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4293 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4294 if (atsr->segment != tmp->segment)
4295 continue;
4296 if (atsr->header.length != tmp->header.length)
4297 continue;
4298 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4299 return atsru;
4300 }
4301
4302 return NULL;
4303}
4304
4305int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004306{
4307 struct acpi_dmar_atsr *atsr;
4308 struct dmar_atsr_unit *atsru;
4309
Jiang Liu6b197242014-11-09 22:47:58 +08004310 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4311 return 0;
4312
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004313 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004314 atsru = dmar_find_atsr(atsr);
4315 if (atsru)
4316 return 0;
4317
4318 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004319 if (!atsru)
4320 return -ENOMEM;
4321
Jiang Liu6b197242014-11-09 22:47:58 +08004322 /*
4323 * If memory is allocated from slab by ACPI _DSM method, we need to
4324 * copy the memory content because the memory buffer will be freed
4325 * on return.
4326 */
4327 atsru->hdr = (void *)(atsru + 1);
4328 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004329 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004330 if (!atsru->include_all) {
4331 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4332 (void *)atsr + atsr->header.length,
4333 &atsru->devices_cnt);
4334 if (atsru->devices_cnt && atsru->devices == NULL) {
4335 kfree(atsru);
4336 return -ENOMEM;
4337 }
4338 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004339
Jiang Liu0e242612014-02-19 14:07:34 +08004340 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004341
4342 return 0;
4343}
4344
Jiang Liu9bdc5312014-01-06 14:18:27 +08004345static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4346{
4347 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4348 kfree(atsru);
4349}
4350
Jiang Liu6b197242014-11-09 22:47:58 +08004351int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4352{
4353 struct acpi_dmar_atsr *atsr;
4354 struct dmar_atsr_unit *atsru;
4355
4356 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4357 atsru = dmar_find_atsr(atsr);
4358 if (atsru) {
4359 list_del_rcu(&atsru->list);
4360 synchronize_rcu();
4361 intel_iommu_free_atsr(atsru);
4362 }
4363
4364 return 0;
4365}
4366
4367int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4368{
4369 int i;
4370 struct device *dev;
4371 struct acpi_dmar_atsr *atsr;
4372 struct dmar_atsr_unit *atsru;
4373
4374 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4375 atsru = dmar_find_atsr(atsr);
4376 if (!atsru)
4377 return 0;
4378
Linus Torvalds194dc872016-07-27 20:03:31 -07004379 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08004380 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4381 i, dev)
4382 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07004383 }
Jiang Liu6b197242014-11-09 22:47:58 +08004384
4385 return 0;
4386}
4387
Jiang Liuffebeb42014-11-09 22:48:02 +08004388static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4389{
4390 int sp, ret = 0;
4391 struct intel_iommu *iommu = dmaru->iommu;
4392
4393 if (g_iommus[iommu->seq_id])
4394 return 0;
4395
4396 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004397 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004398 iommu->name);
4399 return -ENXIO;
4400 }
4401 if (!ecap_sc_support(iommu->ecap) &&
4402 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004403 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004404 iommu->name);
4405 return -ENXIO;
4406 }
4407 sp = domain_update_iommu_superpage(iommu) - 1;
4408 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004409 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004410 iommu->name);
4411 return -ENXIO;
4412 }
4413
4414 /*
4415 * Disable translation if already enabled prior to OS handover.
4416 */
4417 if (iommu->gcmd & DMA_GCMD_TE)
4418 iommu_disable_translation(iommu);
4419
4420 g_iommus[iommu->seq_id] = iommu;
4421 ret = iommu_init_domains(iommu);
4422 if (ret == 0)
4423 ret = iommu_alloc_root_entry(iommu);
4424 if (ret)
4425 goto out;
4426
David Woodhouse8a94ade2015-03-24 14:54:56 +00004427#ifdef CONFIG_INTEL_IOMMU_SVM
4428 if (pasid_enabled(iommu))
4429 intel_svm_alloc_pasid_tables(iommu);
4430#endif
4431
Jiang Liuffebeb42014-11-09 22:48:02 +08004432 if (dmaru->ignored) {
4433 /*
4434 * we always have to disable PMRs or DMA may fail on this device
4435 */
4436 if (force_on)
4437 iommu_disable_protect_mem_regions(iommu);
4438 return 0;
4439 }
4440
4441 intel_iommu_init_qi(iommu);
4442 iommu_flush_write_buffer(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01004443
4444#ifdef CONFIG_INTEL_IOMMU_SVM
4445 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4446 ret = intel_svm_enable_prq(iommu);
4447 if (ret)
4448 goto disable_iommu;
4449 }
4450#endif
Jiang Liuffebeb42014-11-09 22:48:02 +08004451 ret = dmar_set_interrupt(iommu);
4452 if (ret)
4453 goto disable_iommu;
4454
4455 iommu_set_root_entry(iommu);
4456 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4457 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4458 iommu_enable_translation(iommu);
4459
Jiang Liuffebeb42014-11-09 22:48:02 +08004460 iommu_disable_protect_mem_regions(iommu);
4461 return 0;
4462
4463disable_iommu:
4464 disable_dmar_iommu(iommu);
4465out:
4466 free_dmar_iommu(iommu);
4467 return ret;
4468}
4469
Jiang Liu6b197242014-11-09 22:47:58 +08004470int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4471{
Jiang Liuffebeb42014-11-09 22:48:02 +08004472 int ret = 0;
4473 struct intel_iommu *iommu = dmaru->iommu;
4474
4475 if (!intel_iommu_enabled)
4476 return 0;
4477 if (iommu == NULL)
4478 return -EINVAL;
4479
4480 if (insert) {
4481 ret = intel_iommu_add(dmaru);
4482 } else {
4483 disable_dmar_iommu(iommu);
4484 free_dmar_iommu(iommu);
4485 }
4486
4487 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004488}
4489
Jiang Liu9bdc5312014-01-06 14:18:27 +08004490static void intel_iommu_free_dmars(void)
4491{
4492 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4493 struct dmar_atsr_unit *atsru, *atsr_n;
4494
4495 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4496 list_del(&rmrru->list);
4497 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004498 kfree(rmrru->resv);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004499 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004500 }
4501
Jiang Liu9bdc5312014-01-06 14:18:27 +08004502 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4503 list_del(&atsru->list);
4504 intel_iommu_free_atsr(atsru);
4505 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004506}
4507
4508int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4509{
Jiang Liub683b232014-02-19 14:07:32 +08004510 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004511 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004512 struct pci_dev *bridge = NULL;
4513 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004514 struct acpi_dmar_atsr *atsr;
4515 struct dmar_atsr_unit *atsru;
4516
4517 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004518 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004519 bridge = bus->self;
David Woodhoused14053b32015-10-15 09:28:06 +01004520 /* If it's an integrated device, allow ATS */
4521 if (!bridge)
4522 return 1;
4523 /* Connected via non-PCIe: no ATS */
4524 if (!pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004525 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004526 return 0;
David Woodhoused14053b32015-10-15 09:28:06 +01004527 /* If we found the root port, look it up in the ATSR */
Jiang Liub5f82dd2014-02-19 14:07:31 +08004528 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004529 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004530 }
4531
Jiang Liu0e242612014-02-19 14:07:34 +08004532 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004533 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4534 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4535 if (atsr->segment != pci_domain_nr(dev->bus))
4536 continue;
4537
Jiang Liub683b232014-02-19 14:07:32 +08004538 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004539 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004540 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004541
4542 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004543 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004544 }
Jiang Liub683b232014-02-19 14:07:32 +08004545 ret = 0;
4546out:
Jiang Liu0e242612014-02-19 14:07:34 +08004547 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004548
Jiang Liub683b232014-02-19 14:07:32 +08004549 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004550}
4551
Jiang Liu59ce0512014-02-19 14:07:35 +08004552int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4553{
4554 int ret = 0;
4555 struct dmar_rmrr_unit *rmrru;
4556 struct dmar_atsr_unit *atsru;
4557 struct acpi_dmar_atsr *atsr;
4558 struct acpi_dmar_reserved_memory *rmrr;
4559
4560 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4561 return 0;
4562
4563 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4564 rmrr = container_of(rmrru->hdr,
4565 struct acpi_dmar_reserved_memory, header);
4566 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4567 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4568 ((void *)rmrr) + rmrr->header.length,
4569 rmrr->segment, rmrru->devices,
4570 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004571 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004572 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004573 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004574 dmar_remove_dev_scope(info, rmrr->segment,
4575 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004576 }
4577 }
4578
4579 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4580 if (atsru->include_all)
4581 continue;
4582
4583 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4584 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4585 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4586 (void *)atsr + atsr->header.length,
4587 atsr->segment, atsru->devices,
4588 atsru->devices_cnt);
4589 if (ret > 0)
4590 break;
4591 else if(ret < 0)
4592 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004593 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu59ce0512014-02-19 14:07:35 +08004594 if (dmar_remove_dev_scope(info, atsr->segment,
4595 atsru->devices, atsru->devices_cnt))
4596 break;
4597 }
4598 }
4599
4600 return 0;
4601}
4602
Fenghua Yu99dcade2009-11-11 07:23:06 -08004603/*
4604 * Here we only respond to action of unbound device from driver.
4605 *
4606 * Added device is not attached to its DMAR domain here yet. That will happen
4607 * when mapping the device to iova.
4608 */
4609static int device_notifier(struct notifier_block *nb,
4610 unsigned long action, void *data)
4611{
4612 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004613 struct dmar_domain *domain;
4614
David Woodhouse3d891942014-03-06 15:59:26 +00004615 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004616 return 0;
4617
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004618 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004619 return 0;
4620
David Woodhouse1525a292014-03-06 16:19:30 +00004621 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004622 if (!domain)
4623 return 0;
4624
Joerg Roedele6de0f82015-07-22 16:30:36 +02004625 dmar_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004626 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004627 domain_exit(domain);
Alex Williamsona97590e2011-03-04 14:52:16 -07004628
Fenghua Yu99dcade2009-11-11 07:23:06 -08004629 return 0;
4630}
4631
4632static struct notifier_block device_nb = {
4633 .notifier_call = device_notifier,
4634};
4635
Jiang Liu75f05562014-02-19 14:07:37 +08004636static int intel_iommu_memory_notifier(struct notifier_block *nb,
4637 unsigned long val, void *v)
4638{
4639 struct memory_notify *mhp = v;
4640 unsigned long long start, end;
4641 unsigned long start_vpfn, last_vpfn;
4642
4643 switch (val) {
4644 case MEM_GOING_ONLINE:
4645 start = mhp->start_pfn << PAGE_SHIFT;
4646 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4647 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004648 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004649 start, end);
4650 return NOTIFY_BAD;
4651 }
4652 break;
4653
4654 case MEM_OFFLINE:
4655 case MEM_CANCEL_ONLINE:
4656 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4657 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4658 while (start_vpfn <= last_vpfn) {
4659 struct iova *iova;
4660 struct dmar_drhd_unit *drhd;
4661 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004662 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004663
4664 iova = find_iova(&si_domain->iovad, start_vpfn);
4665 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004666 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004667 start_vpfn);
4668 break;
4669 }
4670
4671 iova = split_and_remove_iova(&si_domain->iovad, iova,
4672 start_vpfn, last_vpfn);
4673 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004674 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004675 start_vpfn, last_vpfn);
4676 return NOTIFY_BAD;
4677 }
4678
David Woodhouseea8ea462014-03-05 17:09:32 +00004679 freelist = domain_unmap(si_domain, iova->pfn_lo,
4680 iova->pfn_hi);
4681
Jiang Liu75f05562014-02-19 14:07:37 +08004682 rcu_read_lock();
4683 for_each_active_iommu(iommu, drhd)
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004684 iommu_flush_iotlb_psi(iommu, si_domain,
Jiang Liua156ef92014-07-11 14:19:36 +08004685 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004686 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004687 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004688 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004689
4690 start_vpfn = iova->pfn_hi + 1;
4691 free_iova_mem(iova);
4692 }
4693 break;
4694 }
4695
4696 return NOTIFY_OK;
4697}
4698
4699static struct notifier_block intel_iommu_memory_nb = {
4700 .notifier_call = intel_iommu_memory_notifier,
4701 .priority = 0
4702};
4703
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004704static void free_all_cpu_cached_iovas(unsigned int cpu)
4705{
4706 int i;
4707
4708 for (i = 0; i < g_num_of_iommus; i++) {
4709 struct intel_iommu *iommu = g_iommus[i];
4710 struct dmar_domain *domain;
Aaron Campbell0caa7612016-07-02 21:23:24 -03004711 int did;
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004712
4713 if (!iommu)
4714 continue;
4715
Jan Niehusmann3bd4f912016-06-06 14:20:11 +02004716 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
Aaron Campbell0caa7612016-07-02 21:23:24 -03004717 domain = get_iommu_domain(iommu, (u16)did);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004718
4719 if (!domain)
4720 continue;
4721 free_cpu_cached_iovas(cpu, &domain->iovad);
4722 }
4723 }
4724}
4725
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004726static int intel_iommu_cpu_dead(unsigned int cpu)
Omer Pelegaa473242016-04-20 11:33:02 +03004727{
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004728 free_all_cpu_cached_iovas(cpu);
4729 flush_unmaps_timeout(cpu);
4730 return 0;
Omer Pelegaa473242016-04-20 11:33:02 +03004731}
4732
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004733static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4734{
4735 return container_of(dev, struct intel_iommu, iommu.dev);
4736}
4737
Alex Williamsona5459cf2014-06-12 16:12:31 -06004738static ssize_t intel_iommu_show_version(struct device *dev,
4739 struct device_attribute *attr,
4740 char *buf)
4741{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004742 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004743 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4744 return sprintf(buf, "%d:%d\n",
4745 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4746}
4747static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4748
4749static ssize_t intel_iommu_show_address(struct device *dev,
4750 struct device_attribute *attr,
4751 char *buf)
4752{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004753 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004754 return sprintf(buf, "%llx\n", iommu->reg_phys);
4755}
4756static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4757
4758static ssize_t intel_iommu_show_cap(struct device *dev,
4759 struct device_attribute *attr,
4760 char *buf)
4761{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004762 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004763 return sprintf(buf, "%llx\n", iommu->cap);
4764}
4765static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4766
4767static ssize_t intel_iommu_show_ecap(struct device *dev,
4768 struct device_attribute *attr,
4769 char *buf)
4770{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004771 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004772 return sprintf(buf, "%llx\n", iommu->ecap);
4773}
4774static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4775
Alex Williamson2238c082015-07-14 15:24:53 -06004776static ssize_t intel_iommu_show_ndoms(struct device *dev,
4777 struct device_attribute *attr,
4778 char *buf)
4779{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004780 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004781 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4782}
4783static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4784
4785static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4786 struct device_attribute *attr,
4787 char *buf)
4788{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004789 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004790 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4791 cap_ndoms(iommu->cap)));
4792}
4793static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4794
Alex Williamsona5459cf2014-06-12 16:12:31 -06004795static struct attribute *intel_iommu_attrs[] = {
4796 &dev_attr_version.attr,
4797 &dev_attr_address.attr,
4798 &dev_attr_cap.attr,
4799 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004800 &dev_attr_domains_supported.attr,
4801 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004802 NULL,
4803};
4804
4805static struct attribute_group intel_iommu_group = {
4806 .name = "intel-iommu",
4807 .attrs = intel_iommu_attrs,
4808};
4809
4810const struct attribute_group *intel_iommu_groups[] = {
4811 &intel_iommu_group,
4812 NULL,
4813};
4814
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004815int __init intel_iommu_init(void)
4816{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004817 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004818 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004819 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004820
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004821 /* VT-d is required for a TXT/tboot launch, so enforce that */
4822 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004823
Jiang Liu3a5670e2014-02-19 14:07:33 +08004824 if (iommu_init_mempool()) {
4825 if (force_on)
4826 panic("tboot: Failed to initialize iommu memory\n");
4827 return -ENOMEM;
4828 }
4829
4830 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004831 if (dmar_table_init()) {
4832 if (force_on)
4833 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004834 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004835 }
4836
Suresh Siddhac2c72862011-08-23 17:05:19 -07004837 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004838 if (force_on)
4839 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004840 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004841 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004842
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004843 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004844 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004845
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004846 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004847 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004848
4849 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004850 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004851
Joseph Cihula51a63e62011-03-21 11:04:24 -07004852 if (dmar_init_reserved_ranges()) {
4853 if (force_on)
4854 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004855 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004856 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004857
4858 init_no_remapping_devices();
4859
Joseph Cihulab7792602011-05-03 00:08:37 -07004860 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004861 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004862 if (force_on)
4863 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004864 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004865 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004866 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004867 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004868 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004869
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004870#ifdef CONFIG_SWIOTLB
4871 swiotlb = 0;
4872#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004873 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004874
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004875 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004876
Joerg Roedel39ab9552017-02-01 16:56:46 +01004877 for_each_active_iommu(iommu, drhd) {
4878 iommu_device_sysfs_add(&iommu->iommu, NULL,
4879 intel_iommu_groups,
4880 "%s", iommu->name);
4881 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4882 iommu_device_register(&iommu->iommu);
4883 }
Alex Williamsona5459cf2014-06-12 16:12:31 -06004884
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004885 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004886 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004887 if (si_domain && !hw_pass_through)
4888 register_memory_notifier(&intel_iommu_memory_nb);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004889 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4890 intel_iommu_cpu_dead);
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004891 intel_iommu_enabled = 1;
4892
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004893 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004894
4895out_free_reserved_range:
4896 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004897out_free_dmar:
4898 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004899 up_write(&dmar_global_lock);
4900 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004901 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004902}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004903
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004904static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
Alex Williamson579305f2014-07-03 09:51:43 -06004905{
4906 struct intel_iommu *iommu = opaque;
4907
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004908 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06004909 return 0;
4910}
4911
4912/*
4913 * NB - intel-iommu lacks any sort of reference counting for the users of
4914 * dependent devices. If multiple endpoints have intersecting dependent
4915 * devices, unbinding the driver from any one of them will possibly leave
4916 * the others unable to operate.
4917 */
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004918static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004919{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004920 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004921 return;
4922
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004923 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004924}
4925
Joerg Roedel127c7612015-07-23 17:44:46 +02004926static void __dmar_remove_one_dev_info(struct device_domain_info *info)
Weidong Hanc7151a82008-12-08 22:51:37 +08004927{
Weidong Hanc7151a82008-12-08 22:51:37 +08004928 struct intel_iommu *iommu;
4929 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08004930
Joerg Roedel55d94042015-07-22 16:50:40 +02004931 assert_spin_locked(&device_domain_lock);
4932
Joerg Roedelb608ac32015-07-21 18:19:08 +02004933 if (WARN_ON(!info))
Weidong Hanc7151a82008-12-08 22:51:37 +08004934 return;
4935
Joerg Roedel127c7612015-07-23 17:44:46 +02004936 iommu = info->iommu;
4937
4938 if (info->dev) {
4939 iommu_disable_dev_iotlb(info);
4940 domain_context_clear(iommu, info->dev);
4941 }
4942
Joerg Roedelb608ac32015-07-21 18:19:08 +02004943 unlink_domain_info(info);
Roland Dreier3e7abe22011-07-20 06:22:21 -07004944
Joerg Roedeld160aca2015-07-22 11:52:53 +02004945 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004946 domain_detach_iommu(info->domain, iommu);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004947 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004948
4949 free_devinfo_mem(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004950}
4951
Joerg Roedel55d94042015-07-22 16:50:40 +02004952static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4953 struct device *dev)
4954{
Joerg Roedel127c7612015-07-23 17:44:46 +02004955 struct device_domain_info *info;
Joerg Roedel55d94042015-07-22 16:50:40 +02004956 unsigned long flags;
4957
Weidong Hanc7151a82008-12-08 22:51:37 +08004958 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004959 info = dev->archdata.iommu;
4960 __dmar_remove_one_dev_info(info);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004961 spin_unlock_irqrestore(&device_domain_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004962}
4963
4964static int md_domain_init(struct dmar_domain *domain, int guest_width)
4965{
4966 int adjust_width;
4967
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004968 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4969 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004970 domain_reserve_special_ranges(domain);
4971
4972 /* calculate AGAW */
4973 domain->gaw = guest_width;
4974 adjust_width = guestwidth_to_adjustwidth(guest_width);
4975 domain->agaw = width_to_agaw(adjust_width);
4976
Weidong Han5e98c4b2008-12-08 23:03:27 +08004977 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004978 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004979 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004980 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004981
4982 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004983 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004984 if (!domain->pgd)
4985 return -ENOMEM;
4986 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4987 return 0;
4988}
4989
Joerg Roedel00a77de2015-03-26 13:43:08 +01004990static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004991{
Joerg Roedel5d450802008-12-03 14:52:32 +01004992 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004993 struct iommu_domain *domain;
4994
4995 if (type != IOMMU_DOMAIN_UNMANAGED)
4996 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004997
Jiang Liuab8dfe22014-07-11 14:19:27 +08004998 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004999 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005000 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01005001 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03005002 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07005003 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005004 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08005005 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01005006 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03005007 }
Allen Kay8140a952011-10-14 12:32:17 -07005008 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005009
Joerg Roedel00a77de2015-03-26 13:43:08 +01005010 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01005011 domain->geometry.aperture_start = 0;
5012 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5013 domain->geometry.force_aperture = true;
5014
Joerg Roedel00a77de2015-03-26 13:43:08 +01005015 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03005016}
Kay, Allen M38717942008-09-09 18:37:29 +03005017
Joerg Roedel00a77de2015-03-26 13:43:08 +01005018static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03005019{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005020 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03005021}
Kay, Allen M38717942008-09-09 18:37:29 +03005022
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005023static int intel_iommu_attach_device(struct iommu_domain *domain,
5024 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005025{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005026 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005027 struct intel_iommu *iommu;
5028 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07005029 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03005030
Alex Williamsonc875d2c2014-07-03 09:57:02 -06005031 if (device_is_rmrr_locked(dev)) {
5032 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5033 return -EPERM;
5034 }
5035
David Woodhouse7207d8f2014-03-09 16:31:06 -07005036 /* normally dev is not mapped */
5037 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005038 struct dmar_domain *old_domain;
5039
David Woodhouse1525a292014-03-06 16:19:30 +00005040 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005041 if (old_domain) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02005042 rcu_read_lock();
Joerg Roedelde7e8882015-07-22 11:58:07 +02005043 dmar_remove_one_dev_info(old_domain, dev);
Joerg Roedeld160aca2015-07-22 11:52:53 +02005044 rcu_read_unlock();
Joerg Roedel62c22162014-12-09 12:56:45 +01005045
5046 if (!domain_type_is_vm_or_si(old_domain) &&
5047 list_empty(&old_domain->devices))
5048 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005049 }
5050 }
5051
David Woodhouse156baca2014-03-09 14:00:57 -07005052 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005053 if (!iommu)
5054 return -ENODEV;
5055
5056 /* check if this iommu agaw is sufficient for max mapped address */
5057 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01005058 if (addr_width > cap_mgaw(iommu->cap))
5059 addr_width = cap_mgaw(iommu->cap);
5060
5061 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005062 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005063 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01005064 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005065 return -EFAULT;
5066 }
Tom Lyona99c47a2010-05-17 08:20:45 +01005067 dmar_domain->gaw = addr_width;
5068
5069 /*
5070 * Knock out extra levels of page tables if necessary
5071 */
5072 while (iommu->agaw < dmar_domain->agaw) {
5073 struct dma_pte *pte;
5074
5075 pte = dmar_domain->pgd;
5076 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08005077 dmar_domain->pgd = (struct dma_pte *)
5078 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01005079 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01005080 }
5081 dmar_domain->agaw--;
5082 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005083
Joerg Roedel28ccce02015-07-21 14:45:31 +02005084 return domain_add_dev_info(dmar_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005085}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005086
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005087static void intel_iommu_detach_device(struct iommu_domain *domain,
5088 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005089{
Joerg Roedele6de0f82015-07-22 16:30:36 +02005090 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03005091}
Kay, Allen M38717942008-09-09 18:37:29 +03005092
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005093static int intel_iommu_map(struct iommu_domain *domain,
5094 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005095 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03005096{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005097 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005098 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005099 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005100 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005101
Joerg Roedeldde57a22008-12-03 15:04:09 +01005102 if (iommu_prot & IOMMU_READ)
5103 prot |= DMA_PTE_READ;
5104 if (iommu_prot & IOMMU_WRITE)
5105 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08005106 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5107 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005108
David Woodhouse163cc522009-06-28 00:51:17 +01005109 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005110 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005111 u64 end;
5112
5113 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01005114 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005115 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005116 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005117 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01005118 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005119 return -EFAULT;
5120 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01005121 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005122 }
David Woodhousead051222009-06-28 14:22:28 +01005123 /* Round up size to next multiple of PAGE_SIZE, if it and
5124 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01005125 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01005126 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5127 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005128 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03005129}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005130
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005131static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00005132 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005133{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005134 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00005135 struct page *freelist = NULL;
5136 struct intel_iommu *iommu;
5137 unsigned long start_pfn, last_pfn;
5138 unsigned int npages;
Joerg Roedel42e8c182015-07-21 15:50:02 +02005139 int iommu_id, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01005140
David Woodhouse5cf0a762014-03-19 16:07:49 +00005141 /* Cope with horrid API which requires us to unmap more than the
5142 size argument if it happens to be a large-page mapping. */
Joerg Roedeldc02e462015-08-13 11:15:13 +02005143 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
David Woodhouse5cf0a762014-03-19 16:07:49 +00005144
5145 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5146 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5147
David Woodhouseea8ea462014-03-05 17:09:32 +00005148 start_pfn = iova >> VTD_PAGE_SHIFT;
5149 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5150
5151 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5152
5153 npages = last_pfn - start_pfn + 1;
5154
Joerg Roedel29a27712015-07-21 17:17:12 +02005155 for_each_domain_iommu(iommu_id, dmar_domain) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02005156 iommu = g_iommus[iommu_id];
David Woodhouseea8ea462014-03-05 17:09:32 +00005157
Joerg Roedel42e8c182015-07-21 15:50:02 +02005158 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5159 start_pfn, npages, !freelist, 0);
David Woodhouseea8ea462014-03-05 17:09:32 +00005160 }
5161
5162 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005163
David Woodhouse163cc522009-06-28 00:51:17 +01005164 if (dmar_domain->max_addr == iova + size)
5165 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005166
David Woodhouse5cf0a762014-03-19 16:07:49 +00005167 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005168}
Kay, Allen M38717942008-09-09 18:37:29 +03005169
Joerg Roedeld14d6572008-12-03 15:06:57 +01005170static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05305171 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03005172{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005173 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03005174 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00005175 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005176 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03005177
David Woodhouse5cf0a762014-03-19 16:07:49 +00005178 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03005179 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005180 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03005181
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005182 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03005183}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005184
Joerg Roedel5d587b82014-09-05 10:50:45 +02005185static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005186{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005187 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005188 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04005189 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005190 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005191
Joerg Roedel5d587b82014-09-05 10:50:45 +02005192 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005193}
5194
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005195static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005196{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005197 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005198 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07005199 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04005200
Alex Williamsona5459cf2014-06-12 16:12:31 -06005201 iommu = device_to_iommu(dev, &bus, &devfn);
5202 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005203 return -ENODEV;
5204
Joerg Roedele3d10af2017-02-01 17:23:22 +01005205 iommu_device_link(&iommu->iommu, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005206
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005207 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06005208
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005209 if (IS_ERR(group))
5210 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005211
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005212 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005213 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005214}
5215
5216static void intel_iommu_remove_device(struct device *dev)
5217{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005218 struct intel_iommu *iommu;
5219 u8 bus, devfn;
5220
5221 iommu = device_to_iommu(dev, &bus, &devfn);
5222 if (!iommu)
5223 return;
5224
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005225 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06005226
Joerg Roedele3d10af2017-02-01 17:23:22 +01005227 iommu_device_unlink(&iommu->iommu, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005228}
5229
Eric Auger0659b8d2017-01-19 20:57:53 +00005230static void intel_iommu_get_resv_regions(struct device *device,
5231 struct list_head *head)
5232{
5233 struct iommu_resv_region *reg;
5234 struct dmar_rmrr_unit *rmrr;
5235 struct device *i_dev;
5236 int i;
5237
5238 rcu_read_lock();
5239 for_each_rmrr_units(rmrr) {
5240 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5241 i, i_dev) {
5242 if (i_dev != device)
5243 continue;
5244
5245 list_add_tail(&rmrr->resv->list, head);
5246 }
5247 }
5248 rcu_read_unlock();
5249
5250 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5251 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5252 0, IOMMU_RESV_RESERVED);
5253 if (!reg)
5254 return;
5255 list_add_tail(&reg->list, head);
5256}
5257
5258static void intel_iommu_put_resv_regions(struct device *dev,
5259 struct list_head *head)
5260{
5261 struct iommu_resv_region *entry, *next;
5262
5263 list_for_each_entry_safe(entry, next, head, list) {
5264 if (entry->type == IOMMU_RESV_RESERVED)
5265 kfree(entry);
5266 }
Kay, Allen M38717942008-09-09 18:37:29 +03005267}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005268
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005269#ifdef CONFIG_INTEL_IOMMU_SVM
Jacob Pan65ca7f52016-12-06 10:14:23 -08005270#define MAX_NR_PASID_BITS (20)
5271static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5272{
5273 /*
5274 * Convert ecap_pss to extend context entry pts encoding, also
5275 * respect the soft pasid_max value set by the iommu.
5276 * - number of PASID bits = ecap_pss + 1
5277 * - number of PASID table entries = 2^(pts + 5)
5278 * Therefore, pts = ecap_pss - 4
5279 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5280 */
5281 if (ecap_pss(iommu->ecap) < 5)
5282 return 0;
5283
5284 /* pasid_max is encoded as actual number of entries not the bits */
5285 return find_first_bit((unsigned long *)&iommu->pasid_max,
5286 MAX_NR_PASID_BITS) - 5;
5287}
5288
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005289int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5290{
5291 struct device_domain_info *info;
5292 struct context_entry *context;
5293 struct dmar_domain *domain;
5294 unsigned long flags;
5295 u64 ctx_lo;
5296 int ret;
5297
5298 domain = get_valid_domain_for_dev(sdev->dev);
5299 if (!domain)
5300 return -EINVAL;
5301
5302 spin_lock_irqsave(&device_domain_lock, flags);
5303 spin_lock(&iommu->lock);
5304
5305 ret = -EINVAL;
5306 info = sdev->dev->archdata.iommu;
5307 if (!info || !info->pasid_supported)
5308 goto out;
5309
5310 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5311 if (WARN_ON(!context))
5312 goto out;
5313
5314 ctx_lo = context[0].lo;
5315
5316 sdev->did = domain->iommu_did[iommu->seq_id];
5317 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5318
5319 if (!(ctx_lo & CONTEXT_PASIDE)) {
5320 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
Jacob Pan65ca7f52016-12-06 10:14:23 -08005321 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5322 intel_iommu_get_pts(iommu);
5323
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005324 wmb();
5325 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5326 * extended to permit requests-with-PASID if the PASIDE bit
5327 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5328 * however, the PASIDE bit is ignored and requests-with-PASID
5329 * are unconditionally blocked. Which makes less sense.
5330 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5331 * "guest mode" translation types depending on whether ATS
5332 * is available or not. Annoyingly, we can't use the new
5333 * modes *unless* PASIDE is set. */
5334 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5335 ctx_lo &= ~CONTEXT_TT_MASK;
5336 if (info->ats_supported)
5337 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5338 else
5339 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5340 }
5341 ctx_lo |= CONTEXT_PASIDE;
David Woodhouse907fea32015-10-13 14:11:13 +01005342 if (iommu->pasid_state_table)
5343 ctx_lo |= CONTEXT_DINVE;
David Woodhousea222a7f2015-10-07 23:35:18 +01005344 if (info->pri_supported)
5345 ctx_lo |= CONTEXT_PRS;
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005346 context[0].lo = ctx_lo;
5347 wmb();
5348 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5349 DMA_CCMD_MASK_NOBIT,
5350 DMA_CCMD_DEVICE_INVL);
5351 }
5352
5353 /* Enable PASID support in the device, if it wasn't already */
5354 if (!info->pasid_enabled)
5355 iommu_enable_dev_iotlb(info);
5356
5357 if (info->ats_enabled) {
5358 sdev->dev_iotlb = 1;
5359 sdev->qdep = info->ats_qdep;
5360 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5361 sdev->qdep = 0;
5362 }
5363 ret = 0;
5364
5365 out:
5366 spin_unlock(&iommu->lock);
5367 spin_unlock_irqrestore(&device_domain_lock, flags);
5368
5369 return ret;
5370}
5371
5372struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5373{
5374 struct intel_iommu *iommu;
5375 u8 bus, devfn;
5376
5377 if (iommu_dummy(dev)) {
5378 dev_warn(dev,
5379 "No IOMMU translation for device; cannot enable SVM\n");
5380 return NULL;
5381 }
5382
5383 iommu = device_to_iommu(dev, &bus, &devfn);
5384 if ((!iommu)) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005385 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005386 return NULL;
5387 }
5388
5389 if (!iommu->pasid_table) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005390 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005391 return NULL;
5392 }
5393
5394 return iommu;
5395}
5396#endif /* CONFIG_INTEL_IOMMU_SVM */
5397
Joerg Roedelb0119e82017-02-01 13:23:08 +01005398const struct iommu_ops intel_iommu_ops = {
Eric Auger0659b8d2017-01-19 20:57:53 +00005399 .capable = intel_iommu_capable,
5400 .domain_alloc = intel_iommu_domain_alloc,
5401 .domain_free = intel_iommu_domain_free,
5402 .attach_dev = intel_iommu_attach_device,
5403 .detach_dev = intel_iommu_detach_device,
5404 .map = intel_iommu_map,
5405 .unmap = intel_iommu_unmap,
5406 .map_sg = default_iommu_map_sg,
5407 .iova_to_phys = intel_iommu_iova_to_phys,
5408 .add_device = intel_iommu_add_device,
5409 .remove_device = intel_iommu_remove_device,
5410 .get_resv_regions = intel_iommu_get_resv_regions,
5411 .put_resv_regions = intel_iommu_put_resv_regions,
5412 .device_group = pci_device_group,
5413 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005414};
David Woodhouse9af88142009-02-13 23:18:03 +00005415
Daniel Vetter94526182013-01-20 23:50:13 +01005416static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5417{
5418 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005419 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01005420 dmar_map_gfx = 0;
5421}
5422
5423DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5424DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5425DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5426DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5427DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5428DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5429DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5430
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005431static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00005432{
5433 /*
5434 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01005435 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00005436 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005437 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00005438 rwbf_quirk = 1;
5439}
5440
5441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01005442DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5443DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5444DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5445DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5446DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5447DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07005448
Adam Jacksoneecfd572010-08-25 21:17:34 +01005449#define GGC 0x52
5450#define GGC_MEMORY_SIZE_MASK (0xf << 8)
5451#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5452#define GGC_MEMORY_SIZE_1M (0x1 << 8)
5453#define GGC_MEMORY_SIZE_2M (0x3 << 8)
5454#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5455#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5456#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5457#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5458
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005459static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01005460{
5461 unsigned short ggc;
5462
Adam Jacksoneecfd572010-08-25 21:17:34 +01005463 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01005464 return;
5465
Adam Jacksoneecfd572010-08-25 21:17:34 +01005466 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005467 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01005468 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005469 } else if (dmar_map_gfx) {
5470 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005471 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005472 intel_iommu_strict = 1;
5473 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01005474}
5475DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5476DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5477DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5478DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5479
David Woodhousee0fc7e02009-09-30 09:12:17 -07005480/* On Tylersburg chipsets, some BIOSes have been known to enable the
5481 ISOCH DMAR unit for the Azalia sound device, but not give it any
5482 TLB entries, which causes it to deadlock. Check for that. We do
5483 this in a function called from init_dmars(), instead of in a PCI
5484 quirk, because we don't want to print the obnoxious "BIOS broken"
5485 message if VT-d is actually disabled.
5486*/
5487static void __init check_tylersburg_isoch(void)
5488{
5489 struct pci_dev *pdev;
5490 uint32_t vtisochctrl;
5491
5492 /* If there's no Azalia in the system anyway, forget it. */
5493 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5494 if (!pdev)
5495 return;
5496 pci_dev_put(pdev);
5497
5498 /* System Management Registers. Might be hidden, in which case
5499 we can't do the sanity check. But that's OK, because the
5500 known-broken BIOSes _don't_ actually hide it, so far. */
5501 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5502 if (!pdev)
5503 return;
5504
5505 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5506 pci_dev_put(pdev);
5507 return;
5508 }
5509
5510 pci_dev_put(pdev);
5511
5512 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5513 if (vtisochctrl & 1)
5514 return;
5515
5516 /* Drop all bits other than the number of TLB entries */
5517 vtisochctrl &= 0x1c;
5518
5519 /* If we have the recommended number of TLB entries (16), fine. */
5520 if (vtisochctrl == 0x10)
5521 return;
5522
5523 /* Zero TLB entries? You get to ride the short bus to school. */
5524 if (!vtisochctrl) {
5525 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5526 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5527 dmi_get_system_info(DMI_BIOS_VENDOR),
5528 dmi_get_system_info(DMI_BIOS_VERSION),
5529 dmi_get_system_info(DMI_PRODUCT_VERSION));
5530 iommu_identity_mapping |= IDENTMAP_AZALIA;
5531 return;
5532 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005533
5534 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005535 vtisochctrl);
5536}