blob: 44bb6fda4755fba4cffa363e5dbe90132608eddc [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002/*
David Woodhouseea8ea462014-03-05 17:09:32 +00003 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004 *
David Woodhouseea8ea462014-03-05 17:09:32 +00005 * Authors: David Woodhouse <dwmw2@infradead.org>,
6 * Ashok Raj <ashok.raj@intel.com>,
7 * Shaohua Li <shaohua.li@intel.com>,
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
9 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020010 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070011 */
12
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020013#define pr_fmt(fmt) "DMAR: " fmt
Bjorn Helgaas932a6522019-02-08 16:06:00 -060014#define dev_fmt(fmt) pr_fmt(fmt)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020015
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070016#include <linux/init.h>
17#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080018#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040019#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070020#include <linux/slab.h>
21#include <linux/irq.h>
22#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/spinlock.h>
24#include <linux/pci.h>
25#include <linux/dmar.h>
26#include <linux/dma-mapping.h>
27#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080028#include <linux/memory.h>
Omer Pelegaa473242016-04-20 11:33:02 +030029#include <linux/cpu.h>
mark gross5e0d2a62008-03-04 15:22:08 -080030#include <linux/timer.h>
Dan Williamsdfddb962015-10-09 18:16:46 -040031#include <linux/io.h>
Kay, Allen M38717942008-09-09 18:37:29 +030032#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010033#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010035#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070036#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100037#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020038#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080039#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070040#include <linux/dma-contiguous.h>
Christoph Hellwigfec777c2018-03-19 11:38:15 +010041#include <linux/dma-direct.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020042#include <linux/crash_dump.h>
Anshuman Khandual98fa15f2019-03-05 15:42:58 -080043#include <linux/numa.h>
Lu Baolucfb94a32019-09-06 14:14:52 +080044#include <linux/swiotlb.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Lu Baolucfb94a32019-09-06 14:14:52 +080048#include <trace/events/intel_iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070049
Joerg Roedel672cf6d2020-06-09 15:03:03 +020050#include "../irq_remapping.h"
Lu Baolu02f3eff2020-07-24 09:49:25 +080051#include "pasid.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020052
Fenghua Yu5b6985c2008-10-16 18:02:32 -070053#define ROOT_SIZE VTD_PAGE_SIZE
54#define CONTEXT_SIZE VTD_PAGE_SIZE
55
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000057#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070059#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
61#define IOAPIC_RANGE_START (0xfee00000)
62#define IOAPIC_RANGE_END (0xfeefffff)
63#define IOVA_START_ADDR (0x1000)
64
Sohil Mehta5e3b4a12017-12-20 11:59:24 -080065#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070066
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080068#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069
David Woodhouse2ebe3152009-09-19 07:34:04 -070070#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070078
Robin Murphy1b722502015-01-12 17:51:15 +000079/* IO virtual address start page frame number */
80#define IOVA_START_PFN (1)
81
Mark McLoughlinf27be032008-11-20 15:49:43 +000082#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
mark gross5e0d2a62008-03-04 15:22:08 -080083
Andrew Mortondf08cdc2010-09-22 13:05:11 -070084/* page table handling */
85#define LEVEL_STRIDE (9)
86#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
87
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020088/*
89 * This bitmap is used to advertise the page sizes our hardware support
90 * to the IOMMU core, which will then use this information to split
91 * physically contiguous memory regions it is mapping into page sizes
92 * that we support.
93 *
94 * Traditionally the IOMMU core just handed us the mappings directly,
95 * after making sure the size is an order of a 4KiB page and that the
96 * mapping has natural alignment.
97 *
98 * To retain this behavior, we currently advertise that we support
99 * all page sizes that are an order of 4KiB.
100 *
101 * If at some point we'd like to utilize the IOMMU core's new behavior,
102 * we could change this to advertise the real page sizes we support.
103 */
104#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
105
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700106static inline int agaw_to_level(int agaw)
107{
108 return agaw + 2;
109}
110
111static inline int agaw_to_width(int agaw)
112{
Jiang Liu5c645b32014-01-06 14:18:12 +0800113 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700114}
115
116static inline int width_to_agaw(int width)
117{
Jiang Liu5c645b32014-01-06 14:18:12 +0800118 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700119}
120
121static inline unsigned int level_to_offset_bits(int level)
122{
123 return (level - 1) * LEVEL_STRIDE;
124}
125
Chris Wilson29aaebb2020-08-22 17:02:09 +0100126static inline int pfn_level_offset(u64 pfn, int level)
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700127{
128 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
129}
130
Chris Wilson29aaebb2020-08-22 17:02:09 +0100131static inline u64 level_mask(int level)
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700132{
Chris Wilson29aaebb2020-08-22 17:02:09 +0100133 return -1ULL << level_to_offset_bits(level);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700134}
135
Chris Wilson29aaebb2020-08-22 17:02:09 +0100136static inline u64 level_size(int level)
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700137{
Chris Wilson29aaebb2020-08-22 17:02:09 +0100138 return 1ULL << level_to_offset_bits(level);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700139}
140
Chris Wilson29aaebb2020-08-22 17:02:09 +0100141static inline u64 align_to_level(u64 pfn, int level)
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700142{
143 return (pfn + level_size(level) - 1) & level_mask(level);
144}
David Woodhousefd18de52009-05-10 23:57:41 +0100145
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100146static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
147{
Chris Wilson29aaebb2020-08-22 17:02:09 +0100148 return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100149}
150
David Woodhousedd4e8312009-06-27 16:21:20 +0100151/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
152 are never going to work. */
153static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
154{
155 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
156}
157
158static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
159{
160 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
161}
162static inline unsigned long page_to_dma_pfn(struct page *pg)
163{
164 return mm_to_dma_pfn(page_to_pfn(pg));
165}
166static inline unsigned long virt_to_dma_pfn(void *p)
167{
168 return page_to_dma_pfn(virt_to_page(p));
169}
170
Weidong Hand9630fe2008-12-08 11:06:32 +0800171/* global iommu list, set NULL for ignored DMAR units */
172static struct intel_iommu **g_iommus;
173
David Woodhousee0fc7e02009-09-30 09:12:17 -0700174static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000175static int rwbf_quirk;
176
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000177/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700178 * set to 1 to panic kernel if can't successfully enable VT-d
179 * (used when kernel is launched w/ TXT)
180 */
181static int force_on = 0;
Shaohua Libfd20f12017-04-26 09:18:35 -0700182int intel_iommu_tboot_noforce;
Lu Baolu89a60792018-10-23 15:45:01 +0800183static int no_platform_optin;
Joseph Cihulab7792602011-05-03 00:08:37 -0700184
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000185#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000186
Joerg Roedel091d42e2015-06-12 11:56:10 +0200187/*
188 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
189 * if marked present.
190 */
191static phys_addr_t root_entry_lctp(struct root_entry *re)
192{
193 if (!(re->lo & 1))
194 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000195
Joerg Roedel091d42e2015-06-12 11:56:10 +0200196 return re->lo & VTD_PAGE_MASK;
197}
198
199/*
200 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
201 * if marked present.
202 */
203static phys_addr_t root_entry_uctp(struct root_entry *re)
204{
205 if (!(re->hi & 1))
206 return 0;
207
208 return re->hi & VTD_PAGE_MASK;
209}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000210
Joerg Roedelcf484d02015-06-12 12:21:46 +0200211static inline void context_clear_pasid_enable(struct context_entry *context)
212{
213 context->lo &= ~(1ULL << 11);
214}
215
216static inline bool context_pasid_enabled(struct context_entry *context)
217{
218 return !!(context->lo & (1ULL << 11));
219}
220
221static inline void context_set_copied(struct context_entry *context)
222{
223 context->hi |= (1ull << 3);
224}
225
226static inline bool context_copied(struct context_entry *context)
227{
228 return !!(context->hi & (1ULL << 3));
229}
230
231static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000232{
233 return (context->lo & 1);
234}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200235
Sohil Mehta26b86092018-09-11 17:11:36 -0700236bool context_present(struct context_entry *context)
Joerg Roedelcf484d02015-06-12 12:21:46 +0200237{
238 return context_pasid_enabled(context) ?
239 __context_present(context) :
240 __context_present(context) && !context_copied(context);
241}
242
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000243static inline void context_set_present(struct context_entry *context)
244{
245 context->lo |= 1;
246}
247
248static inline void context_set_fault_enable(struct context_entry *context)
249{
250 context->lo &= (((u64)-1) << 2) | 1;
251}
252
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000253static inline void context_set_translation_type(struct context_entry *context,
254 unsigned long value)
255{
256 context->lo &= (((u64)-1) << 4) | 3;
257 context->lo |= (value & 3) << 2;
258}
259
260static inline void context_set_address_root(struct context_entry *context,
261 unsigned long value)
262{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800263 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000264 context->lo |= value & VTD_PAGE_MASK;
265}
266
267static inline void context_set_address_width(struct context_entry *context,
268 unsigned long value)
269{
270 context->hi |= value & 7;
271}
272
273static inline void context_set_domain_id(struct context_entry *context,
274 unsigned long value)
275{
276 context->hi |= (value & ((1 << 16) - 1)) << 8;
277}
278
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200279static inline int context_domain_id(struct context_entry *c)
280{
281 return((c->hi >> 8) & 0xffff);
282}
283
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000284static inline void context_clear_entry(struct context_entry *context)
285{
286 context->lo = 0;
287 context->hi = 0;
288}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000289
Mark McLoughlin622ba122008-11-20 15:49:46 +0000290/*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700291 * This domain is a statically identity mapping domain.
292 * 1. This domain creats a static 1:1 mapping to all usable memory.
293 * 2. It maps to each iommu if successful.
294 * 3. Each iommu mapps to this domain if successful.
295 */
David Woodhouse19943b02009-08-04 16:19:20 +0100296static struct dmar_domain *si_domain;
297static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700298
Joerg Roedel29a27712015-07-21 17:17:12 +0200299#define for_each_domain_iommu(idx, domain) \
300 for (idx = 0; idx < g_num_of_iommus; idx++) \
301 if (domain->iommu_refcnt[idx])
302
Jiang Liub94e4112014-02-19 14:07:25 +0800303struct dmar_rmrr_unit {
304 struct list_head list; /* list of rmrr units */
305 struct acpi_dmar_header *hdr; /* ACPI header */
306 u64 base_address; /* reserved base address*/
307 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000308 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800309 int devices_cnt; /* target device count */
310};
311
312struct dmar_atsr_unit {
313 struct list_head list; /* list of ATSR units */
314 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000315 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800316 int devices_cnt; /* target device count */
317 u8 include_all:1; /* include all ports */
318};
319
320static LIST_HEAD(dmar_atsr_units);
321static LIST_HEAD(dmar_rmrr_units);
322
323#define for_each_rmrr_units(rmrr) \
324 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
325
mark gross5e0d2a62008-03-04 15:22:08 -0800326/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800327static int g_num_of_iommus;
328
Jiang Liu92d03cc2014-02-19 14:07:28 +0800329static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700330static void domain_remove_dev_info(struct dmar_domain *domain);
Bjorn Helgaas71753232019-02-08 16:06:15 -0600331static void dmar_remove_one_dev_info(struct device *dev);
Joerg Roedel127c7612015-07-23 17:44:46 +0200332static void __dmar_remove_one_dev_info(struct device_domain_info *info);
Lu Baolu8af46c72019-05-25 13:41:32 +0800333static int intel_iommu_attach_device(struct iommu_domain *domain,
334 struct device *dev);
Lu Baolucfb94a32019-09-06 14:14:52 +0800335static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
336 dma_addr_t iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700337
Suresh Siddhad3f13812011-08-23 17:05:25 -0700338#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800339int dmar_disabled = 0;
340#else
341int dmar_disabled = 1;
Lu Baolu04618252020-01-02 08:18:02 +0800342#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800343
Lu Baoluba61c3d2020-05-01 15:24:27 +0800344#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
Lu Baolu04618252020-01-02 08:18:02 +0800345int intel_iommu_sm = 1;
346#else
Sai Praneeth Prakhyacdd3a242019-05-24 16:40:16 -0700347int intel_iommu_sm;
Lu Baoluba61c3d2020-05-01 15:24:27 +0800348#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
Lu Baolu04618252020-01-02 08:18:02 +0800349
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200350int intel_iommu_enabled = 0;
351EXPORT_SYMBOL_GPL(intel_iommu_enabled);
352
David Woodhouse2d9e6672010-06-15 10:57:57 +0100353static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700354static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800355static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100356static int intel_iommu_superpage = 1;
David Woodhouseae853dd2015-09-09 11:58:59 +0100357static int iommu_identity_mapping;
Lu Baolue5e04d02019-09-06 14:14:49 +0800358static int intel_no_bounce;
Lu Baolub1012ca2020-07-23 09:34:37 +0800359static int iommu_skip_te_disable;
David Woodhousec83b2f22015-06-12 10:15:49 +0100360
David Woodhouseae853dd2015-09-09 11:58:59 +0100361#define IDENTMAP_GFX 2
362#define IDENTMAP_AZALIA 4
David Woodhousec83b2f22015-06-12 10:15:49 +0100363
David Woodhousec0771df2011-10-14 20:59:46 +0100364int intel_iommu_gfx_mapped;
365EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
366
Lu Baolu8af46c72019-05-25 13:41:32 +0800367#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
Lu Baolue85bb992020-05-16 14:20:52 +0800368struct device_domain_info *get_domain_info(struct device *dev)
369{
370 struct device_domain_info *info;
371
372 if (!dev)
373 return NULL;
374
Joerg Roedel01b9d4e2020-06-25 15:08:25 +0200375 info = dev_iommu_priv_get(dev);
Lu Baolu2d33b7d2020-09-03 14:51:32 +0800376 if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
Lu Baolue85bb992020-05-16 14:20:52 +0800377 return NULL;
378
379 return info;
380}
381
Lu Baolue2726da2020-01-02 08:18:22 +0800382DEFINE_SPINLOCK(device_domain_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700383static LIST_HEAD(device_domain_list);
384
Lu Baolue5e04d02019-09-06 14:14:49 +0800385#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \
386 to_pci_dev(d)->untrusted)
387
Lu Baolu85319dc2018-07-14 15:46:58 +0800388/*
389 * Iterate over elements in device_domain_list and call the specified
Lu Baolu0bbeb012018-12-10 09:58:56 +0800390 * callback @fn against each element.
Lu Baolu85319dc2018-07-14 15:46:58 +0800391 */
392int for_each_device_domain(int (*fn)(struct device_domain_info *info,
393 void *data), void *data)
394{
395 int ret = 0;
Lu Baolu0bbeb012018-12-10 09:58:56 +0800396 unsigned long flags;
Lu Baolu85319dc2018-07-14 15:46:58 +0800397 struct device_domain_info *info;
398
Lu Baolu0bbeb012018-12-10 09:58:56 +0800399 spin_lock_irqsave(&device_domain_lock, flags);
Lu Baolu85319dc2018-07-14 15:46:58 +0800400 list_for_each_entry(info, &device_domain_list, global) {
401 ret = fn(info, data);
Lu Baolu0bbeb012018-12-10 09:58:56 +0800402 if (ret) {
403 spin_unlock_irqrestore(&device_domain_lock, flags);
Lu Baolu85319dc2018-07-14 15:46:58 +0800404 return ret;
Lu Baolu0bbeb012018-12-10 09:58:56 +0800405 }
Lu Baolu85319dc2018-07-14 15:46:58 +0800406 }
Lu Baolu0bbeb012018-12-10 09:58:56 +0800407 spin_unlock_irqrestore(&device_domain_lock, flags);
Lu Baolu85319dc2018-07-14 15:46:58 +0800408
409 return 0;
410}
411
Joerg Roedelb0119e82017-02-01 13:23:08 +0100412const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100413
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200414static bool translation_pre_enabled(struct intel_iommu *iommu)
415{
416 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
417}
418
Joerg Roedel091d42e2015-06-12 11:56:10 +0200419static void clear_translation_pre_enabled(struct intel_iommu *iommu)
420{
421 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
422}
423
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200424static void init_translation_status(struct intel_iommu *iommu)
425{
426 u32 gsts;
427
428 gsts = readl(iommu->reg + DMAR_GSTS_REG);
429 if (gsts & DMA_GSTS_TES)
430 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
431}
432
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700433static int __init intel_iommu_setup(char *str)
434{
435 if (!str)
436 return -EINVAL;
437 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800438 if (!strncmp(str, "on", 2)) {
439 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200440 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800441 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700442 dmar_disabled = 1;
Lu Baolu89a60792018-10-23 15:45:01 +0800443 no_platform_optin = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200444 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700445 } else if (!strncmp(str, "igfx_off", 8)) {
446 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200447 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700448 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200449 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700450 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800451 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200452 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800453 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100454 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200455 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100456 intel_iommu_superpage = 0;
Lu Baolu8950dcd2019-01-24 10:31:32 +0800457 } else if (!strncmp(str, "sm_on", 5)) {
458 pr_info("Intel-IOMMU: scalable mode supported\n");
459 intel_iommu_sm = 1;
Shaohua Libfd20f12017-04-26 09:18:35 -0700460 } else if (!strncmp(str, "tboot_noforce", 13)) {
Andy Shevchenko86278922020-05-07 19:18:02 +0300461 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
Shaohua Libfd20f12017-04-26 09:18:35 -0700462 intel_iommu_tboot_noforce = 1;
Lu Baolue5e04d02019-09-06 14:14:49 +0800463 } else if (!strncmp(str, "nobounce", 8)) {
464 pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
465 intel_no_bounce = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700466 }
467
468 str += strcspn(str, ",");
469 while (*str == ',')
470 str++;
471 }
472 return 0;
473}
474__setup("intel_iommu=", intel_iommu_setup);
475
476static struct kmem_cache *iommu_domain_cache;
477static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700478
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200479static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
480{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200481 struct dmar_domain **domains;
482 int idx = did >> 8;
483
484 domains = iommu->domains[idx];
485 if (!domains)
486 return NULL;
487
488 return domains[did & 0xff];
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200489}
490
491static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
492 struct dmar_domain *domain)
493{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200494 struct dmar_domain **domains;
495 int idx = did >> 8;
496
497 if (!iommu->domains[idx]) {
498 size_t size = 256 * sizeof(struct dmar_domain *);
499 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
500 }
501
502 domains = iommu->domains[idx];
503 if (WARN_ON(!domains))
504 return;
505 else
506 domains[did & 0xff] = domain;
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200507}
508
Lu Baolu9ddbfb42018-07-14 15:46:57 +0800509void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700510{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700511 struct page *page;
512 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700513
Suresh Siddha4c923d42009-10-02 11:01:24 -0700514 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
515 if (page)
516 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700517 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700518}
519
Lu Baolu9ddbfb42018-07-14 15:46:57 +0800520void free_pgtable_page(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521{
522 free_page((unsigned long)vaddr);
523}
524
525static inline void *alloc_domain_mem(void)
526{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900527 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700528}
529
Kay, Allen M38717942008-09-09 18:37:29 +0300530static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700531{
532 kmem_cache_free(iommu_domain_cache, vaddr);
533}
534
535static inline void * alloc_devinfo_mem(void)
536{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900537 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700538}
539
540static inline void free_devinfo_mem(void *vaddr)
541{
542 kmem_cache_free(iommu_devinfo_cache, vaddr);
543}
544
Joerg Roedel28ccce02015-07-21 14:45:31 +0200545static inline int domain_type_is_si(struct dmar_domain *domain)
546{
547 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
548}
549
Lu Baoluddf09b62020-01-02 08:18:17 +0800550static inline bool domain_use_first_level(struct dmar_domain *domain)
551{
552 return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
553}
554
Jiang Liu162d1b12014-07-11 14:19:35 +0800555static inline int domain_pfn_supported(struct dmar_domain *domain,
556 unsigned long pfn)
557{
558 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
559
560 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
561}
562
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700563static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800564{
565 unsigned long sagaw;
566 int agaw = -1;
567
568 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700569 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800570 agaw >= 0; agaw--) {
571 if (test_bit(agaw, &sagaw))
572 break;
573 }
574
575 return agaw;
576}
577
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700578/*
579 * Calculate max SAGAW for each iommu.
580 */
581int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
582{
583 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
584}
585
586/*
587 * calculate agaw for each iommu.
588 * "SAGAW" may be different across iommus, use a default agaw, and
589 * get a supported less agaw for iommus that don't support the default agaw.
590 */
591int iommu_calculate_agaw(struct intel_iommu *iommu)
592{
593 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
594}
595
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700596/* This functionin only returns single iommu in a domain */
Lu Baolu9ddbfb42018-07-14 15:46:57 +0800597struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
Weidong Han8c11e792008-12-08 15:29:22 +0800598{
599 int iommu_id;
600
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700601 /* si_domain and vm domain should not get here. */
Lu Baolufa954e62019-05-25 13:41:28 +0800602 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
603 return NULL;
604
Joerg Roedel29a27712015-07-21 17:17:12 +0200605 for_each_domain_iommu(iommu_id, domain)
606 break;
607
Weidong Han8c11e792008-12-08 15:29:22 +0800608 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
609 return NULL;
610
611 return g_iommus[iommu_id];
612}
613
Lu Baolu04c00952020-06-23 07:13:44 +0800614static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
615{
616 return sm_supported(iommu) ?
617 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
618}
619
Weidong Han8e6040972008-12-08 15:49:06 +0800620static void domain_update_iommu_coherency(struct dmar_domain *domain)
621{
David Woodhoused0501962014-03-11 17:10:29 -0700622 struct dmar_drhd_unit *drhd;
623 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100624 bool found = false;
625 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800626
David Woodhoused0501962014-03-11 17:10:29 -0700627 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800628
Joerg Roedel29a27712015-07-21 17:17:12 +0200629 for_each_domain_iommu(i, domain) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100630 found = true;
Lu Baolu04c00952020-06-23 07:13:44 +0800631 if (!iommu_paging_structure_coherency(g_iommus[i])) {
Weidong Han8e6040972008-12-08 15:49:06 +0800632 domain->iommu_coherency = 0;
633 break;
634 }
Weidong Han8e6040972008-12-08 15:49:06 +0800635 }
David Woodhoused0501962014-03-11 17:10:29 -0700636 if (found)
637 return;
638
639 /* No hardware attached; use lowest common denominator */
640 rcu_read_lock();
641 for_each_active_iommu(iommu, drhd) {
Lu Baolu04c00952020-06-23 07:13:44 +0800642 if (!iommu_paging_structure_coherency(iommu)) {
David Woodhoused0501962014-03-11 17:10:29 -0700643 domain->iommu_coherency = 0;
644 break;
645 }
646 }
647 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800648}
649
Jiang Liu161f6932014-07-11 14:19:37 +0800650static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100651{
Allen Kay8140a952011-10-14 12:32:17 -0700652 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800653 struct intel_iommu *iommu;
654 int ret = 1;
655
656 rcu_read_lock();
657 for_each_active_iommu(iommu, drhd) {
658 if (iommu != skip) {
659 if (!ecap_sc_support(iommu->ecap)) {
660 ret = 0;
661 break;
662 }
663 }
664 }
665 rcu_read_unlock();
666
667 return ret;
668}
669
Lu Baolu64229e82020-01-02 08:18:20 +0800670static int domain_update_iommu_superpage(struct dmar_domain *domain,
671 struct intel_iommu *skip)
Jiang Liu161f6932014-07-11 14:19:37 +0800672{
673 struct dmar_drhd_unit *drhd;
674 struct intel_iommu *iommu;
Lu Baolu64229e82020-01-02 08:18:20 +0800675 int mask = 0x3;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100676
677 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800678 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100679 }
680
Allen Kay8140a952011-10-14 12:32:17 -0700681 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800682 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700683 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800684 if (iommu != skip) {
Lu Baolu64229e82020-01-02 08:18:20 +0800685 if (domain && domain_use_first_level(domain)) {
686 if (!cap_fl1gp_support(iommu->cap))
687 mask = 0x1;
688 } else {
689 mask &= cap_super_page_val(iommu->cap);
690 }
691
Jiang Liu161f6932014-07-11 14:19:37 +0800692 if (!mask)
693 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100694 }
695 }
Jiang Liu0e242612014-02-19 14:07:34 +0800696 rcu_read_unlock();
697
Jiang Liu161f6932014-07-11 14:19:37 +0800698 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100699}
700
Sheng Yang58c610b2009-03-18 15:33:05 +0800701/* Some capabilities may be different across iommus */
702static void domain_update_iommu_cap(struct dmar_domain *domain)
703{
704 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800705 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
Lu Baolu64229e82020-01-02 08:18:20 +0800706 domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800707}
708
Sohil Mehta26b86092018-09-11 17:11:36 -0700709struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
710 u8 devfn, int alloc)
David Woodhouse03ecc322015-02-13 14:35:21 +0000711{
712 struct root_entry *root = &iommu->root_entry[bus];
713 struct context_entry *context;
714 u64 *entry;
715
Joerg Roedel4df4eab2015-08-25 10:54:28 +0200716 entry = &root->lo;
Lu Baolu765b6a92018-12-10 09:58:55 +0800717 if (sm_supported(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000718 if (devfn >= 0x80) {
719 devfn -= 0x80;
720 entry = &root->hi;
721 }
722 devfn *= 2;
723 }
David Woodhouse03ecc322015-02-13 14:35:21 +0000724 if (*entry & 1)
725 context = phys_to_virt(*entry & VTD_PAGE_MASK);
726 else {
727 unsigned long phy_addr;
728 if (!alloc)
729 return NULL;
730
731 context = alloc_pgtable_page(iommu->node);
732 if (!context)
733 return NULL;
734
735 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
736 phy_addr = virt_to_phys((void *)context);
737 *entry = phy_addr | 1;
738 __iommu_flush_cache(iommu, entry, sizeof(*entry));
739 }
740 return &context[devfn];
741}
742
Joerg Roedel1d46159782020-02-17 17:12:37 +0100743static bool attach_deferred(struct device *dev)
744{
Joerg Roedel01b9d4e2020-06-25 15:08:25 +0200745 return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
Joerg Roedel1d46159782020-02-17 17:12:37 +0100746}
747
Eric Augerb9a7f982019-06-03 08:53:32 +0200748/**
749 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
750 * sub-hierarchy of a candidate PCI-PCI bridge
751 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
752 * @bridge: the candidate PCI-PCI bridge
753 *
754 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
755 */
756static bool
757is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
758{
759 struct pci_dev *pdev, *pbridge;
760
761 if (!dev_is_pci(dev) || !dev_is_pci(bridge))
762 return false;
763
764 pdev = to_pci_dev(dev);
765 pbridge = to_pci_dev(bridge);
766
767 if (pbridge->subordinate &&
768 pbridge->subordinate->number <= pdev->bus->number &&
769 pbridge->subordinate->busn_res.end >= pdev->bus->number)
770 return true;
771
772 return false;
773}
774
Lu Baolu2d33b7d2020-09-03 14:51:32 +0800775static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
776{
777 struct dmar_drhd_unit *drhd;
778 u32 vtbar;
779 int rc;
780
781 /* We know that this device on this chipset has its own IOMMU.
782 * If we find it under a different IOMMU, then the BIOS is lying
783 * to us. Hope that the IOMMU for this device is actually
784 * disabled, and it needs no translation...
785 */
786 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
787 if (rc) {
788 /* "can't" happen */
789 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
790 return false;
791 }
792 vtbar &= 0xffff0000;
793
794 /* we know that the this iommu should be at offset 0xa000 from vtbar */
795 drhd = dmar_find_matched_drhd_unit(pdev);
796 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
797 pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
798 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
799 return true;
800 }
801
802 return false;
803}
804
805static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
806{
807 if (!iommu || iommu->drhd->ignored)
808 return true;
809
810 if (dev_is_pci(dev)) {
811 struct pci_dev *pdev = to_pci_dev(dev);
812
813 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
814 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
815 quirk_ioat_snb_local_iommu(pdev))
816 return true;
817 }
818
819 return false;
820}
821
Lu Baoludd6692f2020-07-24 09:49:21 +0800822struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800823{
824 struct dmar_drhd_unit *drhd = NULL;
Lu Baoludd6692f2020-07-24 09:49:21 +0800825 struct pci_dev *pdev = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800826 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700827 struct device *tmp;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800828 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800829 int i;
830
Lu Baolu2d33b7d2020-09-03 14:51:32 +0800831 if (!dev)
David Woodhouse4ed6a542015-05-11 14:59:20 +0100832 return NULL;
833
David Woodhouse156baca2014-03-09 14:00:57 -0700834 if (dev_is_pci(dev)) {
Ashok Raj1c387182016-10-21 15:32:05 -0700835 struct pci_dev *pf_pdev;
836
Jon Derricke3560ee2020-01-21 06:37:49 -0700837 pdev = pci_real_dma_dev(to_pci_dev(dev));
Jon Derrick5823e332017-08-30 15:05:59 -0600838
Ashok Raj1c387182016-10-21 15:32:05 -0700839 /* VFs aren't listed in scope tables; we need to look up
840 * the PF instead to find the IOMMU. */
841 pf_pdev = pci_physfn(pdev);
842 dev = &pf_pdev->dev;
David Woodhouse156baca2014-03-09 14:00:57 -0700843 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100844 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700845 dev = &ACPI_COMPANION(dev)->dev;
846
Jiang Liu0e242612014-02-19 14:07:34 +0800847 rcu_read_lock();
Lu Baolu2d33b7d2020-09-03 14:51:32 +0800848 for_each_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700849 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100850 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800851
Jiang Liub683b232014-02-19 14:07:32 +0800852 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700853 drhd->devices_cnt, i, tmp) {
854 if (tmp == dev) {
Ashok Raj1c387182016-10-21 15:32:05 -0700855 /* For a VF use its original BDF# not that of the PF
856 * which we used for the IOMMU lookup. Strictly speaking
857 * we could do this for all PCI devices; we only need to
858 * get the BDF# from the scope table for ACPI matches. */
Koos Vriezen5003ae12017-03-01 21:02:50 +0100859 if (pdev && pdev->is_virtfn)
Ashok Raj1c387182016-10-21 15:32:05 -0700860 goto got_pdev;
861
Lu Baoludd6692f2020-07-24 09:49:21 +0800862 if (bus && devfn) {
863 *bus = drhd->devices[i].bus;
864 *devfn = drhd->devices[i].devfn;
865 }
David Woodhouse156baca2014-03-09 14:00:57 -0700866 goto out;
867 }
868
Eric Augerb9a7f982019-06-03 08:53:32 +0200869 if (is_downstream_to_pci_bridge(dev, tmp))
David Woodhouse156baca2014-03-09 14:00:57 -0700870 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100871 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800872
David Woodhouse156baca2014-03-09 14:00:57 -0700873 if (pdev && drhd->include_all) {
874 got_pdev:
Lu Baoludd6692f2020-07-24 09:49:21 +0800875 if (bus && devfn) {
876 *bus = pdev->bus->number;
877 *devfn = pdev->devfn;
878 }
Jiang Liub683b232014-02-19 14:07:32 +0800879 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700880 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800881 }
Jiang Liub683b232014-02-19 14:07:32 +0800882 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700883 out:
Lu Baolu2d33b7d2020-09-03 14:51:32 +0800884 if (iommu_is_dummy(iommu, dev))
885 iommu = NULL;
886
Jiang Liu0e242612014-02-19 14:07:34 +0800887 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800888
Jiang Liub683b232014-02-19 14:07:32 +0800889 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800890}
891
Weidong Han5331fe62008-12-08 23:00:00 +0800892static void domain_flush_cache(struct dmar_domain *domain,
893 void *addr, int size)
894{
895 if (!domain->iommu_coherency)
896 clflush_cache_range(addr, size);
897}
898
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
900{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000902 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700903 unsigned long flags;
904
905 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000906 context = iommu_context_addr(iommu, bus, devfn, 0);
907 if (context)
908 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700909 spin_unlock_irqrestore(&iommu->lock, flags);
910 return ret;
911}
912
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700913static void free_context_table(struct intel_iommu *iommu)
914{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700915 int i;
916 unsigned long flags;
917 struct context_entry *context;
918
919 spin_lock_irqsave(&iommu->lock, flags);
920 if (!iommu->root_entry) {
921 goto out;
922 }
923 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000924 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700925 if (context)
926 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000927
Lu Baolu765b6a92018-12-10 09:58:55 +0800928 if (!sm_supported(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000929 continue;
930
931 context = iommu_context_addr(iommu, i, 0x80, 0);
932 if (context)
933 free_pgtable_page(context);
934
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935 }
936 free_pgtable_page(iommu->root_entry);
937 iommu->root_entry = NULL;
938out:
939 spin_unlock_irqrestore(&iommu->lock, flags);
940}
941
David Woodhouseb026fd22009-06-28 10:37:25 +0100942static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000943 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700944{
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -0600945 struct dma_pte *parent, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700946 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700947 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700948
949 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200950
Jiang Liu162d1b12014-07-11 14:19:35 +0800951 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200952 /* Address beyond IOMMU's addressing capabilities. */
953 return NULL;
954
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700955 parent = domain->pgd;
956
David Woodhouse5cf0a762014-03-19 16:07:49 +0000957 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958 void *tmp_page;
959
David Woodhouseb026fd22009-06-28 10:37:25 +0100960 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700961 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000962 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100963 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000964 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965 break;
966
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000967 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100968 uint64_t pteval;
969
Suresh Siddha4c923d42009-10-02 11:01:24 -0700970 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700971
David Woodhouse206a73c2009-07-01 19:30:28 +0100972 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100974
David Woodhousec85994e2009-07-01 19:21:24 +0100975 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400976 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Lu Baoluddf09b62020-01-02 08:18:17 +0800977 if (domain_use_first_level(domain))
Lu Baolu16ecf102020-06-23 07:13:41 +0800978 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800979 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100980 /* Someone else set it while we were thinking; use theirs. */
981 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800982 else
David Woodhousec85994e2009-07-01 19:21:24 +0100983 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000985 if (level == 1)
986 break;
987
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000988 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 level--;
990 }
991
David Woodhouse5cf0a762014-03-19 16:07:49 +0000992 if (!*target_level)
993 *target_level = level;
994
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700995 return pte;
996}
997
998/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100999static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1000 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001001 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002{
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06001003 struct dma_pte *parent, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001004 int total = agaw_to_level(domain->agaw);
1005 int offset;
1006
1007 parent = domain->pgd;
1008 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +01001009 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001010 pte = &parent[offset];
1011 if (level == total)
1012 return pte;
1013
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001014 if (!dma_pte_present(pte)) {
1015 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001016 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001017 }
1018
Yijing Wange16922a2014-05-20 20:37:51 +08001019 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001020 *large_page = total;
1021 return pte;
1022 }
1023
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001024 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001025 total--;
1026 }
1027 return NULL;
1028}
1029
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001030/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001031static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +01001032 unsigned long start_pfn,
1033 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001034{
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06001035 unsigned int large_page;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001036 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001037
Jiang Liu162d1b12014-07-11 14:19:35 +08001038 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1039 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001040 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001041
David Woodhouse04b18e62009-06-27 19:15:01 +01001042 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001043 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001044 large_page = 1;
1045 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001046 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001047 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001048 continue;
1049 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001050 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001051 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001052 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001053 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001054 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1055
David Woodhouse310a5ab2009-06-28 18:52:20 +01001056 domain_flush_cache(domain, first_pte,
1057 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001058
1059 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001060}
1061
Alex Williamson3269ee02013-06-15 10:27:19 -06001062static void dma_pte_free_level(struct dmar_domain *domain, int level,
David Dillowbc24c572017-06-28 19:42:23 -07001063 int retain_level, struct dma_pte *pte,
1064 unsigned long pfn, unsigned long start_pfn,
1065 unsigned long last_pfn)
Alex Williamson3269ee02013-06-15 10:27:19 -06001066{
1067 pfn = max(start_pfn, pfn);
1068 pte = &pte[pfn_level_offset(pfn, level)];
1069
1070 do {
1071 unsigned long level_pfn;
1072 struct dma_pte *level_pte;
1073
1074 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1075 goto next;
1076
David Dillowf7116e12017-01-30 19:11:11 -08001077 level_pfn = pfn & level_mask(level);
Alex Williamson3269ee02013-06-15 10:27:19 -06001078 level_pte = phys_to_virt(dma_pte_addr(pte));
1079
David Dillowbc24c572017-06-28 19:42:23 -07001080 if (level > 2) {
1081 dma_pte_free_level(domain, level - 1, retain_level,
1082 level_pte, level_pfn, start_pfn,
1083 last_pfn);
1084 }
Alex Williamson3269ee02013-06-15 10:27:19 -06001085
David Dillowbc24c572017-06-28 19:42:23 -07001086 /*
1087 * Free the page table if we're below the level we want to
1088 * retain and the range covers the entire table.
1089 */
1090 if (level < retain_level && !(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001091 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001092 dma_clear_pte(pte);
1093 domain_flush_cache(domain, pte, sizeof(*pte));
1094 free_pgtable_page(level_pte);
1095 }
1096next:
1097 pfn += level_size(level);
1098 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1099}
1100
David Dillowbc24c572017-06-28 19:42:23 -07001101/*
1102 * clear last level (leaf) ptes and free page table pages below the
1103 * level we wish to keep intact.
1104 */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001105static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001106 unsigned long start_pfn,
David Dillowbc24c572017-06-28 19:42:23 -07001107 unsigned long last_pfn,
1108 int retain_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001109{
Jiang Liu162d1b12014-07-11 14:19:35 +08001110 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1111 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001112 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001113
Jiang Liud41a4ad2014-07-11 14:19:34 +08001114 dma_pte_clear_range(domain, start_pfn, last_pfn);
1115
David Woodhousef3a0a522009-06-30 03:40:07 +01001116 /* We don't need lock here; nobody else touches the iova range */
David Dillowbc24c572017-06-28 19:42:23 -07001117 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
Alex Williamson3269ee02013-06-15 10:27:19 -06001118 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001119
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001120 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001121 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001122 free_pgtable_page(domain->pgd);
1123 domain->pgd = NULL;
1124 }
1125}
1126
David Woodhouseea8ea462014-03-05 17:09:32 +00001127/* When a page at a given level is being unlinked from its parent, we don't
1128 need to *modify* it at all. All we need to do is make a list of all the
1129 pages which can be freed just as soon as we've flushed the IOTLB and we
1130 know the hardware page-walk will no longer touch them.
1131 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1132 be freed. */
1133static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1134 int level, struct dma_pte *pte,
1135 struct page *freelist)
1136{
1137 struct page *pg;
1138
1139 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1140 pg->freelist = freelist;
1141 freelist = pg;
1142
1143 if (level == 1)
1144 return freelist;
1145
Jiang Liuadeb2592014-04-09 10:20:39 +08001146 pte = page_address(pg);
1147 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001148 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1149 freelist = dma_pte_list_pagetables(domain, level - 1,
1150 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001151 pte++;
1152 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001153
1154 return freelist;
1155}
1156
1157static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1158 struct dma_pte *pte, unsigned long pfn,
1159 unsigned long start_pfn,
1160 unsigned long last_pfn,
1161 struct page *freelist)
1162{
1163 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1164
1165 pfn = max(start_pfn, pfn);
1166 pte = &pte[pfn_level_offset(pfn, level)];
1167
1168 do {
1169 unsigned long level_pfn;
1170
1171 if (!dma_pte_present(pte))
1172 goto next;
1173
1174 level_pfn = pfn & level_mask(level);
1175
1176 /* If range covers entire pagetable, free it */
1177 if (start_pfn <= level_pfn &&
1178 last_pfn >= level_pfn + level_size(level) - 1) {
1179 /* These suborbinate page tables are going away entirely. Don't
1180 bother to clear them; we're just going to *free* them. */
1181 if (level > 1 && !dma_pte_superpage(pte))
1182 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1183
1184 dma_clear_pte(pte);
1185 if (!first_pte)
1186 first_pte = pte;
1187 last_pte = pte;
1188 } else if (level > 1) {
1189 /* Recurse down into a level that isn't *entirely* obsolete */
1190 freelist = dma_pte_clear_level(domain, level - 1,
1191 phys_to_virt(dma_pte_addr(pte)),
1192 level_pfn, start_pfn, last_pfn,
1193 freelist);
1194 }
1195next:
1196 pfn += level_size(level);
1197 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1198
1199 if (first_pte)
1200 domain_flush_cache(domain, first_pte,
1201 (void *)++last_pte - (void *)first_pte);
1202
1203 return freelist;
1204}
1205
1206/* We can't just free the pages because the IOMMU may still be walking
1207 the page tables, and may have cached the intermediate levels. The
1208 pages can only be freed after the IOTLB flush has been done. */
Joerg Roedelb6904202015-08-13 11:32:18 +02001209static struct page *domain_unmap(struct dmar_domain *domain,
1210 unsigned long start_pfn,
1211 unsigned long last_pfn)
David Woodhouseea8ea462014-03-05 17:09:32 +00001212{
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06001213 struct page *freelist;
David Woodhouseea8ea462014-03-05 17:09:32 +00001214
Jiang Liu162d1b12014-07-11 14:19:35 +08001215 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1216 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001217 BUG_ON(start_pfn > last_pfn);
1218
1219 /* we don't need lock here; nobody else touches the iova range */
1220 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1221 domain->pgd, 0, start_pfn, last_pfn, NULL);
1222
1223 /* free pgd */
1224 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1225 struct page *pgd_page = virt_to_page(domain->pgd);
1226 pgd_page->freelist = freelist;
1227 freelist = pgd_page;
1228
1229 domain->pgd = NULL;
1230 }
1231
1232 return freelist;
1233}
1234
Joerg Roedelb6904202015-08-13 11:32:18 +02001235static void dma_free_pagelist(struct page *freelist)
David Woodhouseea8ea462014-03-05 17:09:32 +00001236{
1237 struct page *pg;
1238
1239 while ((pg = freelist)) {
1240 freelist = pg->freelist;
1241 free_pgtable_page(page_address(pg));
1242 }
1243}
1244
Joerg Roedel13cf0172017-08-11 11:40:10 +02001245static void iova_entry_free(unsigned long data)
1246{
1247 struct page *freelist = (struct page *)data;
1248
1249 dma_free_pagelist(freelist);
1250}
1251
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252/* iommu handling */
1253static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1254{
1255 struct root_entry *root;
1256 unsigned long flags;
1257
Suresh Siddha4c923d42009-10-02 11:01:24 -07001258 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001259 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001260 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001261 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001262 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001263 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001265 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001266
1267 spin_lock_irqsave(&iommu->lock, flags);
1268 iommu->root_entry = root;
1269 spin_unlock_irqrestore(&iommu->lock, flags);
1270
1271 return 0;
1272}
1273
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001274static void iommu_set_root_entry(struct intel_iommu *iommu)
1275{
David Woodhouse03ecc322015-02-13 14:35:21 +00001276 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001277 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001278 unsigned long flag;
1279
David Woodhouse03ecc322015-02-13 14:35:21 +00001280 addr = virt_to_phys(iommu->root_entry);
Lu Baolu7373a8c2018-12-10 09:59:03 +08001281 if (sm_supported(iommu))
1282 addr |= DMA_RTADDR_SMT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001284 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001285 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001286
David Woodhousec416daa2009-05-10 20:30:58 +01001287 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288
1289 /* Make sure hardware complete it */
1290 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001291 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001292
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001293 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001294}
1295
Lu Baolu6f7db752018-12-10 09:59:00 +08001296void iommu_flush_write_buffer(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001297{
1298 u32 val;
1299 unsigned long flag;
1300
David Woodhouse9af88142009-02-13 23:18:03 +00001301 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001302 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001303
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001304 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001305 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001306
1307 /* Make sure hardware complete it */
1308 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001309 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001310
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001311 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001312}
1313
1314/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001315static void __iommu_flush_context(struct intel_iommu *iommu,
1316 u16 did, u16 source_id, u8 function_mask,
1317 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001318{
1319 u64 val = 0;
1320 unsigned long flag;
1321
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001322 switch (type) {
1323 case DMA_CCMD_GLOBAL_INVL:
1324 val = DMA_CCMD_GLOBAL_INVL;
1325 break;
1326 case DMA_CCMD_DOMAIN_INVL:
1327 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1328 break;
1329 case DMA_CCMD_DEVICE_INVL:
1330 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1331 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1332 break;
1333 default:
1334 BUG();
1335 }
1336 val |= DMA_CCMD_ICC;
1337
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001338 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1340
1341 /* Make sure hardware complete it */
1342 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1343 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1344
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001345 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001346}
1347
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001348/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001349static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1350 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351{
1352 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1353 u64 val = 0, val_iva = 0;
1354 unsigned long flag;
1355
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001356 switch (type) {
1357 case DMA_TLB_GLOBAL_FLUSH:
1358 /* global flush doesn't need set IVA_REG */
1359 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1360 break;
1361 case DMA_TLB_DSI_FLUSH:
1362 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1363 break;
1364 case DMA_TLB_PSI_FLUSH:
1365 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001366 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001367 val_iva = size_order | addr;
1368 break;
1369 default:
1370 BUG();
1371 }
1372 /* Note: set drain read/write */
1373#if 0
1374 /*
1375 * This is probably to be super secure.. Looks like we can
1376 * ignore it without any impact.
1377 */
1378 if (cap_read_drain(iommu->cap))
1379 val |= DMA_TLB_READ_DRAIN;
1380#endif
1381 if (cap_write_drain(iommu->cap))
1382 val |= DMA_TLB_WRITE_DRAIN;
1383
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001384 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385 /* Note: Only uses first TLB reg currently */
1386 if (val_iva)
1387 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1388 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1389
1390 /* Make sure hardware complete it */
1391 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1392 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1393
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001394 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001395
1396 /* check IOTLB invalidation granularity */
1397 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001398 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001400 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001401 (unsigned long long)DMA_TLB_IIRG(type),
1402 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001403}
1404
David Woodhouse64ae8922014-03-09 12:52:30 -07001405static struct device_domain_info *
1406iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1407 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408{
Yu Zhao93a23a72009-05-18 13:51:37 +08001409 struct device_domain_info *info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001410
Joerg Roedel55d94042015-07-22 16:50:40 +02001411 assert_spin_locked(&device_domain_lock);
1412
Yu Zhao93a23a72009-05-18 13:51:37 +08001413 if (!iommu->qi)
1414 return NULL;
1415
Yu Zhao93a23a72009-05-18 13:51:37 +08001416 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001417 if (info->iommu == iommu && info->bus == bus &&
1418 info->devfn == devfn) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001419 if (info->ats_supported && info->dev)
1420 return info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001421 break;
1422 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001423
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001424 return NULL;
Yu Zhao93a23a72009-05-18 13:51:37 +08001425}
1426
Omer Peleg0824c592016-04-20 19:03:35 +03001427static void domain_update_iotlb(struct dmar_domain *domain)
1428{
1429 struct device_domain_info *info;
1430 bool has_iotlb_device = false;
1431
1432 assert_spin_locked(&device_domain_lock);
1433
1434 list_for_each_entry(info, &domain->devices, link) {
1435 struct pci_dev *pdev;
1436
1437 if (!info->dev || !dev_is_pci(info->dev))
1438 continue;
1439
1440 pdev = to_pci_dev(info->dev);
1441 if (pdev->ats_enabled) {
1442 has_iotlb_device = true;
1443 break;
1444 }
1445 }
1446
1447 domain->has_iotlb_device = has_iotlb_device;
1448}
1449
Yu Zhao93a23a72009-05-18 13:51:37 +08001450static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1451{
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001452 struct pci_dev *pdev;
1453
Omer Peleg0824c592016-04-20 19:03:35 +03001454 assert_spin_locked(&device_domain_lock);
1455
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001456 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001457 return;
1458
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001459 pdev = to_pci_dev(info->dev);
Jacob Pan1c48db42018-06-07 09:57:00 -07001460 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1461 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1462 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1463 * reserved, which should be set to 0.
1464 */
1465 if (!ecap_dit(info->iommu->ecap))
1466 info->pfsid = 0;
1467 else {
1468 struct pci_dev *pf_pdev;
1469
1470 /* pdev will be returned if device is not a vf */
1471 pf_pdev = pci_physfn(pdev);
Heiner Kallweitcc49baa2019-04-24 21:16:10 +02001472 info->pfsid = pci_dev_id(pf_pdev);
Jacob Pan1c48db42018-06-07 09:57:00 -07001473 }
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001474
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001475#ifdef CONFIG_INTEL_IOMMU_SVM
1476 /* The PCIe spec, in its wisdom, declares that the behaviour of
1477 the device if you enable PASID support after ATS support is
1478 undefined. So always enable PASID support on devices which
1479 have it, even if we can't yet know if we're ever going to
1480 use it. */
1481 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1482 info->pasid_enabled = 1;
1483
Kuppuswamy Sathyanarayanan1b84778a2019-02-19 11:04:52 -08001484 if (info->pri_supported &&
1485 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1486 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001487 info->pri_enabled = 1;
1488#endif
Jean-Philippe Bruckerda656a02020-05-20 17:22:03 +02001489 if (info->ats_supported && pci_ats_page_aligned(pdev) &&
Mika Westerbergfb58fdc2018-10-29 13:47:08 +03001490 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001491 info->ats_enabled = 1;
Omer Peleg0824c592016-04-20 19:03:35 +03001492 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001493 info->ats_qdep = pci_ats_queue_depth(pdev);
1494 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001495}
1496
1497static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1498{
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001499 struct pci_dev *pdev;
1500
Omer Peleg0824c592016-04-20 19:03:35 +03001501 assert_spin_locked(&device_domain_lock);
1502
Jeremy McNicollda972fb2016-01-14 21:33:06 -08001503 if (!dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001504 return;
1505
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001506 pdev = to_pci_dev(info->dev);
1507
1508 if (info->ats_enabled) {
1509 pci_disable_ats(pdev);
1510 info->ats_enabled = 0;
Omer Peleg0824c592016-04-20 19:03:35 +03001511 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001512 }
1513#ifdef CONFIG_INTEL_IOMMU_SVM
1514 if (info->pri_enabled) {
1515 pci_disable_pri(pdev);
1516 info->pri_enabled = 0;
1517 }
1518 if (info->pasid_enabled) {
1519 pci_disable_pasid(pdev);
1520 info->pasid_enabled = 0;
1521 }
1522#endif
Yu Zhao93a23a72009-05-18 13:51:37 +08001523}
1524
1525static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1526 u64 addr, unsigned mask)
1527{
1528 u16 sid, qdep;
1529 unsigned long flags;
1530 struct device_domain_info *info;
1531
Omer Peleg0824c592016-04-20 19:03:35 +03001532 if (!domain->has_iotlb_device)
1533 return;
1534
Yu Zhao93a23a72009-05-18 13:51:37 +08001535 spin_lock_irqsave(&device_domain_lock, flags);
1536 list_for_each_entry(info, &domain->devices, link) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001537 if (!info->ats_enabled)
Yu Zhao93a23a72009-05-18 13:51:37 +08001538 continue;
1539
1540 sid = info->bus << 8 | info->devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001541 qdep = info->ats_qdep;
Jacob Pan1c48db42018-06-07 09:57:00 -07001542 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1543 qdep, addr, mask);
Yu Zhao93a23a72009-05-18 13:51:37 +08001544 }
1545 spin_unlock_irqrestore(&device_domain_lock, flags);
1546}
1547
Lu Baolu33cd6e62020-01-02 08:18:18 +08001548static void domain_flush_piotlb(struct intel_iommu *iommu,
1549 struct dmar_domain *domain,
1550 u64 addr, unsigned long npages, bool ih)
1551{
1552 u16 did = domain->iommu_did[iommu->seq_id];
1553
1554 if (domain->default_pasid)
1555 qi_flush_piotlb(iommu, did, domain->default_pasid,
1556 addr, npages, ih);
1557
1558 if (!list_empty(&domain->devices))
1559 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
1560}
1561
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001562static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1563 struct dmar_domain *domain,
1564 unsigned long pfn, unsigned int pages,
1565 int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001567 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001568 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001569 u16 did = domain->iommu_did[iommu->seq_id];
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001570
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001571 BUG_ON(pages == 0);
1572
David Woodhouseea8ea462014-03-05 17:09:32 +00001573 if (ih)
1574 ih = 1 << 6;
Lu Baolu33cd6e62020-01-02 08:18:18 +08001575
1576 if (domain_use_first_level(domain)) {
1577 domain_flush_piotlb(iommu, domain, addr, pages, ih);
1578 } else {
1579 /*
1580 * Fallback to domain selective flush if no PSI support or
1581 * the size is too big. PSI requires page size to be 2 ^ x,
1582 * and the base address is naturally aligned to the size.
1583 */
1584 if (!cap_pgsel_inv(iommu->cap) ||
1585 mask > cap_max_amask_val(iommu->cap))
1586 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1587 DMA_TLB_DSI_FLUSH);
1588 else
1589 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1590 DMA_TLB_PSI_FLUSH);
1591 }
Yu Zhaobf92df32009-06-29 11:31:45 +08001592
1593 /*
Nadav Amit82653632010-04-01 13:24:40 +03001594 * In caching mode, changes of pages from non-present to present require
1595 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001596 */
Nadav Amit82653632010-04-01 13:24:40 +03001597 if (!cap_caching_mode(iommu->cap) || !map)
Peter Xu9d2e6502018-01-10 13:51:37 +08001598 iommu_flush_dev_iotlb(domain, addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001599}
1600
Peter Xueed91a02018-05-04 10:34:52 +08001601/* Notification for newly created mappings */
1602static inline void __mapping_notify_one(struct intel_iommu *iommu,
1603 struct dmar_domain *domain,
1604 unsigned long pfn, unsigned int pages)
1605{
Lu Baolu33cd6e62020-01-02 08:18:18 +08001606 /*
1607 * It's a non-present to present mapping. Only flush if caching mode
1608 * and second level.
1609 */
1610 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
Peter Xueed91a02018-05-04 10:34:52 +08001611 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1612 else
1613 iommu_flush_write_buffer(iommu);
1614}
1615
Joerg Roedel13cf0172017-08-11 11:40:10 +02001616static void iommu_flush_iova(struct iova_domain *iovad)
1617{
1618 struct dmar_domain *domain;
1619 int idx;
1620
1621 domain = container_of(iovad, struct dmar_domain, iovad);
1622
1623 for_each_domain_iommu(idx, domain) {
1624 struct intel_iommu *iommu = g_iommus[idx];
1625 u16 did = domain->iommu_did[iommu->seq_id];
1626
Lu Baolu33cd6e62020-01-02 08:18:18 +08001627 if (domain_use_first_level(domain))
1628 domain_flush_piotlb(iommu, domain, 0, -1, 0);
1629 else
1630 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1631 DMA_TLB_DSI_FLUSH);
Joerg Roedel13cf0172017-08-11 11:40:10 +02001632
1633 if (!cap_caching_mode(iommu->cap))
1634 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1635 0, MAX_AGAW_PFN_WIDTH);
1636 }
1637}
1638
mark grossf8bab732008-02-08 04:18:38 -08001639static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1640{
1641 u32 pmen;
1642 unsigned long flags;
1643
Lu Baolu5bb71fc72019-03-20 09:58:33 +08001644 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1645 return;
1646
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001647 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001648 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1649 pmen &= ~DMA_PMEN_EPM;
1650 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1651
1652 /* wait for the protected region status bit to clear */
1653 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1654 readl, !(pmen & DMA_PMEN_PRS), pmen);
1655
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001656 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001657}
1658
Jiang Liu2a41cce2014-07-11 14:19:33 +08001659static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660{
1661 u32 sts;
1662 unsigned long flags;
1663
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001664 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001665 iommu->gcmd |= DMA_GCMD_TE;
1666 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667
1668 /* Make sure hardware complete it */
1669 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001670 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001672 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001673}
1674
Jiang Liu2a41cce2014-07-11 14:19:33 +08001675static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676{
1677 u32 sts;
1678 unsigned long flag;
1679
Lu Baolub1012ca2020-07-23 09:34:37 +08001680 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1681 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1682 return;
1683
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001684 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 iommu->gcmd &= ~DMA_GCMD_TE;
1686 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1687
1688 /* Make sure hardware complete it */
1689 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001690 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001691
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001692 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001693}
1694
1695static int iommu_init_domains(struct intel_iommu *iommu)
1696{
Joerg Roedel8bf47812015-07-21 10:41:21 +02001697 u32 ndomains, nlongs;
1698 size_t size;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699
1700 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001701 pr_debug("%s: Number of Domains supported <%d>\n",
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001702 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703 nlongs = BITS_TO_LONGS(ndomains);
1704
Donald Dutile94a91b502009-08-20 16:51:34 -04001705 spin_lock_init(&iommu->lock);
1706
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001707 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1708 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001709 pr_err("%s: Allocating domain id array failed\n",
1710 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711 return -ENOMEM;
1712 }
Joerg Roedel8bf47812015-07-21 10:41:21 +02001713
Wei Yang86f004c2016-05-21 02:41:51 +00001714 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001715 iommu->domains = kzalloc(size, GFP_KERNEL);
1716
1717 if (iommu->domains) {
1718 size = 256 * sizeof(struct dmar_domain *);
1719 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1720 }
1721
1722 if (!iommu->domains || !iommu->domains[0]) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001723 pr_err("%s: Allocating domain array failed\n",
1724 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001725 kfree(iommu->domain_ids);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001726 kfree(iommu->domains);
Jiang Liu852bdb02014-01-06 14:18:11 +08001727 iommu->domain_ids = NULL;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001728 iommu->domains = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729 return -ENOMEM;
1730 }
1731
1732 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001733 * If Caching mode is set, then invalid translations are tagged
1734 * with domain-id 0, hence we need to pre-allocate it. We also
1735 * use domain-id 0 as a marker for non-allocated domain-id, so
1736 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001738 set_bit(0, iommu->domain_ids);
1739
Lu Baolu3b33d4a2018-12-10 09:58:59 +08001740 /*
1741 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1742 * entry for first-level or pass-through translation modes should
1743 * be programmed with a domain id different from those used for
1744 * second-level or nested translation. We reserve a domain id for
1745 * this purpose.
1746 */
1747 if (sm_supported(iommu))
1748 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1749
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001750 return 0;
1751}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001752
Jiang Liuffebeb42014-11-09 22:48:02 +08001753static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754{
Joerg Roedel29a27712015-07-21 17:17:12 +02001755 struct device_domain_info *info, *tmp;
Joerg Roedel55d94042015-07-22 16:50:40 +02001756 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001757
Joerg Roedel29a27712015-07-21 17:17:12 +02001758 if (!iommu->domains || !iommu->domain_ids)
1759 return;
Jiang Liua4eaa862014-02-19 14:07:30 +08001760
Joerg Roedel55d94042015-07-22 16:50:40 +02001761 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001762 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
Joerg Roedel29a27712015-07-21 17:17:12 +02001763 if (info->iommu != iommu)
1764 continue;
1765
1766 if (!info->dev || !info->domain)
1767 continue;
1768
Joerg Roedelbea64032016-11-08 15:08:26 +01001769 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001770 }
Joerg Roedel55d94042015-07-22 16:50:40 +02001771 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001772
1773 if (iommu->gcmd & DMA_GCMD_TE)
1774 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001775}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001776
Jiang Liuffebeb42014-11-09 22:48:02 +08001777static void free_dmar_iommu(struct intel_iommu *iommu)
1778{
1779 if ((iommu->domains) && (iommu->domain_ids)) {
Wei Yang86f004c2016-05-21 02:41:51 +00001780 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001781 int i;
1782
1783 for (i = 0; i < elems; i++)
1784 kfree(iommu->domains[i]);
Jiang Liuffebeb42014-11-09 22:48:02 +08001785 kfree(iommu->domains);
1786 kfree(iommu->domain_ids);
1787 iommu->domains = NULL;
1788 iommu->domain_ids = NULL;
1789 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790
Weidong Hand9630fe2008-12-08 11:06:32 +08001791 g_iommus[iommu->seq_id] = NULL;
1792
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793 /* free context mapping */
1794 free_context_table(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001795
1796#ifdef CONFIG_INTEL_IOMMU_SVM
Lu Baolu765b6a92018-12-10 09:58:55 +08001797 if (pasid_supported(iommu)) {
David Woodhousea222a7f2015-10-07 23:35:18 +01001798 if (ecap_prs(iommu->ecap))
1799 intel_svm_finish_prq(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01001800 }
Jacob Pan33753032020-05-16 14:20:51 +08001801 if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
1802 ioasid_unregister_allocator(&iommu->pasid_allocator);
1803
David Woodhouse8a94ade2015-03-24 14:54:56 +00001804#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001805}
1806
Lu Baolua1948f22020-01-02 08:18:14 +08001807/*
1808 * Check and return whether first level is used by default for
Lu Baolub802d072020-01-02 08:18:21 +08001809 * DMA translation.
Lu Baolua1948f22020-01-02 08:18:14 +08001810 */
1811static bool first_level_by_default(void)
1812{
1813 struct dmar_drhd_unit *drhd;
1814 struct intel_iommu *iommu;
Lu Baolub802d072020-01-02 08:18:21 +08001815 static int first_level_support = -1;
Lu Baolua1948f22020-01-02 08:18:14 +08001816
1817 if (likely(first_level_support != -1))
1818 return first_level_support;
1819
1820 first_level_support = 1;
1821
1822 rcu_read_lock();
1823 for_each_active_iommu(iommu, drhd) {
1824 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
1825 first_level_support = 0;
1826 break;
1827 }
1828 }
1829 rcu_read_unlock();
1830
1831 return first_level_support;
1832}
1833
Jiang Liuab8dfe22014-07-11 14:19:27 +08001834static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001835{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001836 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001837
1838 domain = alloc_domain_mem();
1839 if (!domain)
1840 return NULL;
1841
Jiang Liuab8dfe22014-07-11 14:19:27 +08001842 memset(domain, 0, sizeof(*domain));
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08001843 domain->nid = NUMA_NO_NODE;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001844 domain->flags = flags;
Lu Baolua1948f22020-01-02 08:18:14 +08001845 if (first_level_by_default())
1846 domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
Omer Peleg0824c592016-04-20 19:03:35 +03001847 domain->has_iotlb_device = false;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001848 INIT_LIST_HEAD(&domain->devices);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001849
1850 return domain;
1851}
1852
Joerg Roedeld160aca2015-07-22 11:52:53 +02001853/* Must be called with iommu->lock */
1854static int domain_attach_iommu(struct dmar_domain *domain,
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001855 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001856{
Jiang Liu44bde612014-07-11 14:19:29 +08001857 unsigned long ndomains;
Joerg Roedel55d94042015-07-22 16:50:40 +02001858 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001859
Joerg Roedel55d94042015-07-22 16:50:40 +02001860 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001861 assert_spin_locked(&iommu->lock);
Jiang Liu44bde612014-07-11 14:19:29 +08001862
Joerg Roedel29a27712015-07-21 17:17:12 +02001863 domain->iommu_refcnt[iommu->seq_id] += 1;
1864 domain->iommu_count += 1;
1865 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
Jiang Liufb170fb2014-07-11 14:19:28 +08001866 ndomains = cap_ndoms(iommu->cap);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001867 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1868
1869 if (num >= ndomains) {
1870 pr_err("%s: No free domain ids\n", iommu->name);
1871 domain->iommu_refcnt[iommu->seq_id] -= 1;
1872 domain->iommu_count -= 1;
Joerg Roedel55d94042015-07-22 16:50:40 +02001873 return -ENOSPC;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001874 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001875
Joerg Roedeld160aca2015-07-22 11:52:53 +02001876 set_bit(num, iommu->domain_ids);
1877 set_iommu_domain(iommu, num, domain);
Jiang Liufb170fb2014-07-11 14:19:28 +08001878
Joerg Roedeld160aca2015-07-22 11:52:53 +02001879 domain->iommu_did[iommu->seq_id] = num;
1880 domain->nid = iommu->node;
1881
Jiang Liufb170fb2014-07-11 14:19:28 +08001882 domain_update_iommu_cap(domain);
1883 }
Joerg Roedeld160aca2015-07-22 11:52:53 +02001884
Joerg Roedel55d94042015-07-22 16:50:40 +02001885 return 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001886}
1887
1888static int domain_detach_iommu(struct dmar_domain *domain,
1889 struct intel_iommu *iommu)
1890{
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06001891 int num, count;
Jiang Liufb170fb2014-07-11 14:19:28 +08001892
Joerg Roedel55d94042015-07-22 16:50:40 +02001893 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001894 assert_spin_locked(&iommu->lock);
Jiang Liufb170fb2014-07-11 14:19:28 +08001895
Joerg Roedel29a27712015-07-21 17:17:12 +02001896 domain->iommu_refcnt[iommu->seq_id] -= 1;
1897 count = --domain->iommu_count;
1898 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02001899 num = domain->iommu_did[iommu->seq_id];
1900 clear_bit(num, iommu->domain_ids);
1901 set_iommu_domain(iommu, num, NULL);
1902
Jiang Liufb170fb2014-07-11 14:19:28 +08001903 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001904 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001905 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001906
1907 return count;
1908}
1909
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001911static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001912
Joseph Cihula51a63e62011-03-21 11:04:24 -07001913static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914{
1915 struct pci_dev *pdev = NULL;
1916 struct iova *iova;
1917 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001918
Zhen Leiaa3ac942017-09-21 16:52:45 +01001919 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001920
Mark Gross8a443df2008-03-04 14:59:31 -08001921 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1922 &reserved_rbtree_key);
1923
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001924 /* IOAPIC ranges shouldn't be accessed by DMA */
1925 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1926 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001927 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001928 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001929 return -ENODEV;
1930 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931
1932 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1933 for_each_pci_dev(pdev) {
1934 struct resource *r;
1935
1936 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1937 r = &pdev->resource[i];
1938 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1939 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001940 iova = reserve_iova(&reserved_iova_list,
1941 IOVA_PFN(r->start),
1942 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001943 if (!iova) {
Bjorn Helgaas932a6522019-02-08 16:06:00 -06001944 pci_err(pdev, "Reserve iova for %pR failed\n", r);
Joseph Cihula51a63e62011-03-21 11:04:24 -07001945 return -ENODEV;
1946 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947 }
1948 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001949 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001950}
1951
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001952static inline int guestwidth_to_adjustwidth(int gaw)
1953{
1954 int agaw;
1955 int r = (gaw - 12) % 9;
1956
1957 if (r == 0)
1958 agaw = gaw;
1959 else
1960 agaw = gaw + 9 - r;
1961 if (agaw > 64)
1962 agaw = 64;
1963 return agaw;
1964}
1965
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001966static void domain_exit(struct dmar_domain *domain)
1967{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968
Joerg Roedeld160aca2015-07-22 11:52:53 +02001969 /* Remove associated devices and clear attached or cached domains */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001971
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001972 /* destroy iovas */
Tom Murphye70b0812020-05-16 14:21:01 +08001973 if (domain->domain.type == IOMMU_DOMAIN_DMA)
1974 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001975
Dmitry Safonov3ee9eca2019-07-16 22:38:06 +01001976 if (domain->pgd) {
1977 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001978
Dmitry Safonov3ee9eca2019-07-16 22:38:06 +01001979 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1980 dma_free_pagelist(freelist);
1981 }
David Woodhouseea8ea462014-03-05 17:09:32 +00001982
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001983 free_domain_mem(domain);
1984}
1985
Lu Baolu7373a8c2018-12-10 09:59:03 +08001986/*
1987 * Get the PASID directory size for scalable mode context entry.
1988 * Value of X in the PDTS field of a scalable mode context entry
1989 * indicates PASID directory with 2^(X + 7) entries.
1990 */
1991static inline unsigned long context_get_sm_pds(struct pasid_table *table)
1992{
1993 int pds, max_pde;
1994
1995 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
1996 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
1997 if (pds < 7)
1998 return 0;
1999
2000 return pds - 7;
2001}
2002
2003/*
2004 * Set the RID_PASID field of a scalable mode context entry. The
2005 * IOMMU hardware will use the PASID value set in this field for
2006 * DMA translations of DMA requests without PASID.
2007 */
2008static inline void
2009context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
2010{
2011 context->hi |= pasid & ((1 << 20) - 1);
Lu Baolu7373a8c2018-12-10 09:59:03 +08002012}
2013
2014/*
2015 * Set the DTE(Device-TLB Enable) field of a scalable mode context
2016 * entry.
2017 */
2018static inline void context_set_sm_dte(struct context_entry *context)
2019{
2020 context->lo |= (1 << 2);
2021}
2022
2023/*
2024 * Set the PRE(Page Request Enable) field of a scalable mode context
2025 * entry.
2026 */
2027static inline void context_set_sm_pre(struct context_entry *context)
2028{
2029 context->lo |= (1 << 4);
2030}
2031
2032/* Convert value to context PASID directory size field coding. */
2033#define context_pdts(pds) (((pds) & 0x7) << 9)
2034
David Woodhouse64ae8922014-03-09 12:52:30 -07002035static int domain_context_mapping_one(struct dmar_domain *domain,
2036 struct intel_iommu *iommu,
Lu Baoluca6e3222018-12-10 09:59:02 +08002037 struct pasid_table *table,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002038 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002039{
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002040 u16 did = domain->iommu_did[iommu->seq_id];
Joerg Roedel28ccce02015-07-21 14:45:31 +02002041 int translation = CONTEXT_TT_MULTI_LEVEL;
2042 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002043 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002044 unsigned long flags;
Lu Baolu7373a8c2018-12-10 09:59:03 +08002045 int ret;
Joerg Roedel28ccce02015-07-21 14:45:31 +02002046
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002047 WARN_ON(did == 0);
2048
Joerg Roedel28ccce02015-07-21 14:45:31 +02002049 if (hw_pass_through && domain_type_is_si(domain))
2050 translation = CONTEXT_TT_PASS_THROUGH;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002051
2052 pr_debug("Set context mapping for %02x:%02x.%d\n",
2053 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002054
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002055 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08002056
Joerg Roedel55d94042015-07-22 16:50:40 +02002057 spin_lock_irqsave(&device_domain_lock, flags);
2058 spin_lock(&iommu->lock);
2059
2060 ret = -ENOMEM;
David Woodhouse03ecc322015-02-13 14:35:21 +00002061 context = iommu_context_addr(iommu, bus, devfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002062 if (!context)
Joerg Roedel55d94042015-07-22 16:50:40 +02002063 goto out_unlock;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002064
Joerg Roedel55d94042015-07-22 16:50:40 +02002065 ret = 0;
2066 if (context_present(context))
2067 goto out_unlock;
Joerg Roedelcf484d02015-06-12 12:21:46 +02002068
Xunlei Pangaec0e862016-12-05 20:09:07 +08002069 /*
2070 * For kdump cases, old valid entries may be cached due to the
2071 * in-flight DMA and copied pgtable, but there is no unmapping
2072 * behaviour for them, thus we need an explicit cache flush for
2073 * the newly-mapped device. For kdump, at this point, the device
2074 * is supposed to finish reset at its driver probe stage, so no
2075 * in-flight DMA will exist, and we don't need to worry anymore
2076 * hereafter.
2077 */
2078 if (context_copied(context)) {
2079 u16 did_old = context_domain_id(context);
2080
Christos Gkekasb117e032017-10-08 23:33:31 +01002081 if (did_old < cap_ndoms(iommu->cap)) {
Xunlei Pangaec0e862016-12-05 20:09:07 +08002082 iommu->flush.flush_context(iommu, did_old,
2083 (((u16)bus) << 8) | devfn,
2084 DMA_CCMD_MASK_NOBIT,
2085 DMA_CCMD_DEVICE_INVL);
KarimAllah Ahmedf73a7ee2017-05-05 11:39:59 -07002086 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2087 DMA_TLB_DSI_FLUSH);
2088 }
Xunlei Pangaec0e862016-12-05 20:09:07 +08002089 }
2090
Joerg Roedelde24e552015-07-21 14:53:04 +02002091 context_clear_entry(context);
Weidong Hanea6606b2008-12-08 23:08:15 +08002092
Lu Baolu7373a8c2018-12-10 09:59:03 +08002093 if (sm_supported(iommu)) {
2094 unsigned long pds;
Joerg Roedelde24e552015-07-21 14:53:04 +02002095
Lu Baolu7373a8c2018-12-10 09:59:03 +08002096 WARN_ON(!table);
2097
2098 /* Setup the PASID DIR pointer: */
2099 pds = context_get_sm_pds(table);
2100 context->lo = (u64)virt_to_phys(table->table) |
2101 context_pdts(pds);
2102
2103 /* Setup the RID_PASID field: */
2104 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2105
2106 /*
2107 * Setup the Device-TLB enable bit and Page request
2108 * Enable bit:
2109 */
David Woodhouse64ae8922014-03-09 12:52:30 -07002110 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002111 if (info && info->ats_supported)
Lu Baolu7373a8c2018-12-10 09:59:03 +08002112 context_set_sm_dte(context);
2113 if (info && info->pri_supported)
2114 context_set_sm_pre(context);
Joerg Roedelde24e552015-07-21 14:53:04 +02002115 } else {
Lu Baolu7373a8c2018-12-10 09:59:03 +08002116 struct dma_pte *pgd = domain->pgd;
2117 int agaw;
2118
2119 context_set_domain_id(context, did);
Lu Baolu7373a8c2018-12-10 09:59:03 +08002120
2121 if (translation != CONTEXT_TT_PASS_THROUGH) {
2122 /*
2123 * Skip top levels of page tables for iommu which has
2124 * less agaw than default. Unnecessary for PT mode.
2125 */
2126 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2127 ret = -ENOMEM;
2128 pgd = phys_to_virt(dma_pte_addr(pgd));
2129 if (!dma_pte_present(pgd))
2130 goto out_unlock;
2131 }
2132
2133 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2134 if (info && info->ats_supported)
2135 translation = CONTEXT_TT_DEV_IOTLB;
2136 else
2137 translation = CONTEXT_TT_MULTI_LEVEL;
2138
2139 context_set_address_root(context, virt_to_phys(pgd));
2140 context_set_address_width(context, agaw);
2141 } else {
2142 /*
2143 * In pass through mode, AW must be programmed to
2144 * indicate the largest AGAW value supported by
2145 * hardware. And ASR is ignored by hardware.
2146 */
2147 context_set_address_width(context, iommu->msagaw);
2148 }
Lu Baolu41b80db2019-03-01 11:23:11 +08002149
2150 context_set_translation_type(context, translation);
Yu Zhao93a23a72009-05-18 13:51:37 +08002151 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002152
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00002153 context_set_fault_enable(context);
2154 context_set_present(context);
Lu Baolu04c00952020-06-23 07:13:44 +08002155 if (!ecap_coherent(iommu->ecap))
2156 clflush_cache_range(context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002157
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002158 /*
2159 * It's a non-present to present mapping. If hardware doesn't cache
2160 * non-present entry we only need to flush the write-buffer. If the
2161 * _does_ cache non-present entries, then it does so in the special
2162 * domain #0, which we have to flush:
2163 */
2164 if (cap_caching_mode(iommu->cap)) {
2165 iommu->flush.flush_context(iommu, 0,
2166 (((u16)bus) << 8) | devfn,
2167 DMA_CCMD_MASK_NOBIT,
2168 DMA_CCMD_DEVICE_INVL);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002169 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002170 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002171 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002172 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002173 iommu_enable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08002174
Joerg Roedel55d94042015-07-22 16:50:40 +02002175 ret = 0;
2176
2177out_unlock:
2178 spin_unlock(&iommu->lock);
2179 spin_unlock_irqrestore(&device_domain_lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08002180
Wei Yang5c365d12016-07-13 13:53:21 +00002181 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182}
2183
Lu Baolu0ce4a852019-08-26 16:50:56 +08002184struct domain_context_mapping_data {
2185 struct dmar_domain *domain;
2186 struct intel_iommu *iommu;
2187 struct pasid_table *table;
2188};
2189
2190static int domain_context_mapping_cb(struct pci_dev *pdev,
2191 u16 alias, void *opaque)
2192{
2193 struct domain_context_mapping_data *data = opaque;
2194
2195 return domain_context_mapping_one(data->domain, data->iommu,
2196 data->table, PCI_BUS_NUM(alias),
2197 alias & 0xff);
2198}
2199
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002200static int
Joerg Roedel28ccce02015-07-21 14:45:31 +02002201domain_context_mapping(struct dmar_domain *domain, struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002202{
Lu Baolu0ce4a852019-08-26 16:50:56 +08002203 struct domain_context_mapping_data data;
Lu Baoluca6e3222018-12-10 09:59:02 +08002204 struct pasid_table *table;
David Woodhouse64ae8922014-03-09 12:52:30 -07002205 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002206 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002207
David Woodhousee1f167f2014-03-09 15:24:46 -07002208 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002209 if (!iommu)
2210 return -ENODEV;
2211
Lu Baoluca6e3222018-12-10 09:59:02 +08002212 table = intel_pasid_get_table(dev);
Lu Baolu0ce4a852019-08-26 16:50:56 +08002213
2214 if (!dev_is_pci(dev))
2215 return domain_context_mapping_one(domain, iommu, table,
2216 bus, devfn);
2217
2218 data.domain = domain;
2219 data.iommu = iommu;
2220 data.table = table;
2221
2222 return pci_for_each_dma_alias(to_pci_dev(dev),
2223 &domain_context_mapping_cb, &data);
Alex Williamson579305f2014-07-03 09:51:43 -06002224}
2225
2226static int domain_context_mapped_cb(struct pci_dev *pdev,
2227 u16 alias, void *opaque)
2228{
2229 struct intel_iommu *iommu = opaque;
2230
2231 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002232}
2233
David Woodhousee1f167f2014-03-09 15:24:46 -07002234static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002235{
Weidong Han5331fe62008-12-08 23:00:00 +08002236 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002237 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002238
David Woodhousee1f167f2014-03-09 15:24:46 -07002239 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002240 if (!iommu)
2241 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002242
Alex Williamson579305f2014-07-03 09:51:43 -06002243 if (!dev_is_pci(dev))
2244 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002245
Alex Williamson579305f2014-07-03 09:51:43 -06002246 return !pci_for_each_dma_alias(to_pci_dev(dev),
2247 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002248}
2249
Fenghua Yuf5329592009-08-04 15:09:37 -07002250/* Returns a number of VTD pages, but aligned to MM page size */
2251static inline unsigned long aligned_nrpages(unsigned long host_addr,
2252 size_t size)
2253{
2254 host_addr &= ~PAGE_MASK;
2255 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2256}
2257
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002258/* Return largest possible superpage level for a given mapping */
2259static inline int hardware_largepage_caps(struct dmar_domain *domain,
2260 unsigned long iov_pfn,
2261 unsigned long phy_pfn,
2262 unsigned long pages)
2263{
2264 int support, level = 1;
2265 unsigned long pfnmerge;
2266
2267 support = domain->iommu_superpage;
2268
2269 /* To use a large page, the virtual *and* physical addresses
2270 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2271 of them will mean we have to use smaller pages. So just
2272 merge them and check both at once. */
2273 pfnmerge = iov_pfn | phy_pfn;
2274
2275 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2276 pages >>= VTD_STRIDE_SHIFT;
2277 if (!pages)
2278 break;
2279 pfnmerge >>= VTD_STRIDE_SHIFT;
2280 level++;
2281 support--;
2282 }
2283 return level;
2284}
2285
David Woodhouse9051aa02009-06-29 12:30:54 +01002286static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2287 struct scatterlist *sg, unsigned long phys_pfn,
2288 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002289{
2290 struct dma_pte *first_pte = NULL, *pte = NULL;
Kees Cook3f649ab2020-06-03 13:09:38 -07002291 phys_addr_t pteval;
Jiang Liucc4f14a2014-11-26 09:42:10 +08002292 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002293 unsigned int largepage_lvl = 0;
2294 unsigned long lvl_pages = 0;
Lu Baoluddf09b62020-01-02 08:18:17 +08002295 u64 attr;
David Woodhousee1605492009-06-29 11:17:38 +01002296
Jiang Liu162d1b12014-07-11 14:19:35 +08002297 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002298
2299 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2300 return -EINVAL;
2301
Lu Baoluddf09b62020-01-02 08:18:17 +08002302 attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
2303 if (domain_use_first_level(domain))
Lu Baolu16ecf102020-06-23 07:13:41 +08002304 attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
David Woodhousee1605492009-06-29 11:17:38 +01002305
Jiang Liucc4f14a2014-11-26 09:42:10 +08002306 if (!sg) {
2307 sg_res = nr_pages;
Lu Baoluddf09b62020-01-02 08:18:17 +08002308 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
David Woodhouse9051aa02009-06-29 12:30:54 +01002309 }
2310
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002311 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002312 uint64_t tmp;
2313
David Woodhousee1605492009-06-29 11:17:38 +01002314 if (!sg_res) {
Robin Murphy29a90b72017-09-28 15:14:01 +01002315 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2316
Fenghua Yuf5329592009-08-04 15:09:37 -07002317 sg_res = aligned_nrpages(sg->offset, sg->length);
Robin Murphy29a90b72017-09-28 15:14:01 +01002318 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
David Woodhousee1605492009-06-29 11:17:38 +01002319 sg->dma_length = sg->length;
Lu Baoluddf09b62020-01-02 08:18:17 +08002320 pteval = (sg_phys(sg) - pgoff) | attr;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002321 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002322 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002323
David Woodhousee1605492009-06-29 11:17:38 +01002324 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002325 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2326
David Woodhouse5cf0a762014-03-19 16:07:49 +00002327 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002328 if (!pte)
2329 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002330 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002331 if (largepage_lvl > 1) {
Christian Zanderba2374f2015-06-10 09:41:45 -07002332 unsigned long nr_superpages, end_pfn;
2333
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002334 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002335 lvl_pages = lvl_to_nr_pages(largepage_lvl);
Christian Zanderba2374f2015-06-10 09:41:45 -07002336
2337 nr_superpages = sg_res / lvl_pages;
2338 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2339
Jiang Liud41a4ad2014-07-11 14:19:34 +08002340 /*
2341 * Ensure that old small page tables are
Christian Zanderba2374f2015-06-10 09:41:45 -07002342 * removed to make room for superpage(s).
David Dillowbc24c572017-06-28 19:42:23 -07002343 * We're adding new large pages, so make sure
2344 * we don't remove their parent tables.
Jiang Liud41a4ad2014-07-11 14:19:34 +08002345 */
David Dillowbc24c572017-06-28 19:42:23 -07002346 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2347 largepage_lvl + 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002348 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002349 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002350 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002351
David Woodhousee1605492009-06-29 11:17:38 +01002352 }
2353 /* We don't need lock here, nobody else
2354 * touches the iova range
2355 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002356 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002357 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002358 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002359 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2360 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002361 if (dumps) {
2362 dumps--;
2363 debug_dma_dump_mappings(NULL);
2364 }
2365 WARN_ON(1);
2366 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002367
2368 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2369
2370 BUG_ON(nr_pages < lvl_pages);
2371 BUG_ON(sg_res < lvl_pages);
2372
2373 nr_pages -= lvl_pages;
2374 iov_pfn += lvl_pages;
2375 phys_pfn += lvl_pages;
2376 pteval += lvl_pages * VTD_PAGE_SIZE;
2377 sg_res -= lvl_pages;
2378
2379 /* If the next PTE would be the first in a new page, then we
2380 need to flush the cache on the entries we've just written.
2381 And then we'll need to recalculate 'pte', so clear it and
2382 let it get set again in the if (!pte) block above.
2383
2384 If we're done (!nr_pages) we need to flush the cache too.
2385
2386 Also if we've been setting superpages, we may need to
2387 recalculate 'pte' and switch back to smaller pages for the
2388 end of the mapping, if the trailing size is not enough to
2389 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002390 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002391 if (!nr_pages || first_pte_in_page(pte) ||
2392 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002393 domain_flush_cache(domain, first_pte,
2394 (void *)pte - (void *)first_pte);
2395 pte = NULL;
2396 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002397
2398 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002399 sg = sg_next(sg);
2400 }
2401 return 0;
2402}
2403
Peter Xu87684fd2018-05-04 10:34:53 +08002404static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
Lu Baolu095303e2019-04-29 09:16:02 +08002405 struct scatterlist *sg, unsigned long phys_pfn,
2406 unsigned long nr_pages, int prot)
Peter Xu87684fd2018-05-04 10:34:53 +08002407{
Lu Baolufa954e62019-05-25 13:41:28 +08002408 int iommu_id, ret;
Lu Baolu095303e2019-04-29 09:16:02 +08002409 struct intel_iommu *iommu;
Peter Xu87684fd2018-05-04 10:34:53 +08002410
Lu Baolu095303e2019-04-29 09:16:02 +08002411 /* Do the real mapping first */
2412 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2413 if (ret)
2414 return ret;
Peter Xu87684fd2018-05-04 10:34:53 +08002415
Lu Baolufa954e62019-05-25 13:41:28 +08002416 for_each_domain_iommu(iommu_id, domain) {
2417 iommu = g_iommus[iommu_id];
Lu Baolu095303e2019-04-29 09:16:02 +08002418 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2419 }
2420
2421 return 0;
Peter Xu87684fd2018-05-04 10:34:53 +08002422}
2423
David Woodhouse9051aa02009-06-29 12:30:54 +01002424static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2425 struct scatterlist *sg, unsigned long nr_pages,
2426 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002427{
Peter Xu87684fd2018-05-04 10:34:53 +08002428 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
David Woodhouse9051aa02009-06-29 12:30:54 +01002429}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002430
David Woodhouse9051aa02009-06-29 12:30:54 +01002431static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2432 unsigned long phys_pfn, unsigned long nr_pages,
2433 int prot)
2434{
Peter Xu87684fd2018-05-04 10:34:53 +08002435 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002436}
2437
Joerg Roedel2452d9d2015-07-23 16:20:14 +02002438static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002439{
Filippo Sironi50822192017-08-31 10:58:11 +02002440 unsigned long flags;
2441 struct context_entry *context;
2442 u16 did_old;
2443
Weidong Hanc7151a82008-12-08 22:51:37 +08002444 if (!iommu)
2445 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002446
Filippo Sironi50822192017-08-31 10:58:11 +02002447 spin_lock_irqsave(&iommu->lock, flags);
2448 context = iommu_context_addr(iommu, bus, devfn, 0);
2449 if (!context) {
2450 spin_unlock_irqrestore(&iommu->lock, flags);
2451 return;
2452 }
2453 did_old = context_domain_id(context);
2454 context_clear_entry(context);
2455 __iommu_flush_cache(iommu, context, sizeof(*context));
2456 spin_unlock_irqrestore(&iommu->lock, flags);
2457 iommu->flush.flush_context(iommu,
2458 did_old,
2459 (((u16)bus) << 8) | devfn,
2460 DMA_CCMD_MASK_NOBIT,
2461 DMA_CCMD_DEVICE_INVL);
2462 iommu->flush.flush_iotlb(iommu,
2463 did_old,
2464 0,
2465 0,
2466 DMA_TLB_DSI_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002467}
2468
David Woodhouse109b9b02012-05-25 17:43:02 +01002469static inline void unlink_domain_info(struct device_domain_info *info)
2470{
2471 assert_spin_locked(&device_domain_lock);
2472 list_del(&info->link);
2473 list_del(&info->global);
2474 if (info->dev)
Joerg Roedel01b9d4e2020-06-25 15:08:25 +02002475 dev_iommu_priv_set(info->dev, NULL);
David Woodhouse109b9b02012-05-25 17:43:02 +01002476}
2477
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002478static void domain_remove_dev_info(struct dmar_domain *domain)
2479{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002480 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002481 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002482
2483 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel76f45fe2015-07-21 18:25:11 +02002484 list_for_each_entry_safe(info, tmp, &domain->devices, link)
Joerg Roedel127c7612015-07-23 17:44:46 +02002485 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002486 spin_unlock_irqrestore(&device_domain_lock, flags);
2487}
2488
Lu Baolue2726da2020-01-02 08:18:22 +08002489struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002490{
2491 struct device_domain_info *info;
2492
Lu Baolu2d33b7d2020-09-03 14:51:32 +08002493 if (unlikely(attach_deferred(dev)))
Lu Baolu1ee0186b2019-09-21 15:06:44 +08002494 return NULL;
2495
2496 /* No lock here, assumes no domain exit in normal case */
Lu Baolue85bb992020-05-16 14:20:52 +08002497 info = get_domain_info(dev);
Lu Baolu1ee0186b2019-09-21 15:06:44 +08002498 if (likely(info))
2499 return info->domain;
2500
2501 return NULL;
2502}
2503
Joerg Roedel034d98c2020-02-17 17:16:19 +01002504static void do_deferred_attach(struct device *dev)
2505{
2506 struct iommu_domain *domain;
2507
Joerg Roedel01b9d4e2020-06-25 15:08:25 +02002508 dev_iommu_priv_set(dev, NULL);
Joerg Roedel034d98c2020-02-17 17:16:19 +01002509 domain = iommu_get_domain_for_dev(dev);
2510 if (domain)
2511 intel_iommu_attach_device(domain, dev);
2512}
2513
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002514static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002515dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2516{
2517 struct device_domain_info *info;
2518
2519 list_for_each_entry(info, &device_domain_list, global)
Jon Derrick4fda2302020-05-27 10:56:16 -06002520 if (info->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002521 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002522 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002523
2524 return NULL;
2525}
2526
Lu Baoluddf09b62020-01-02 08:18:17 +08002527static int domain_setup_first_level(struct intel_iommu *iommu,
2528 struct dmar_domain *domain,
2529 struct device *dev,
Fenghua Yuc7b6bac2020-09-15 09:30:05 -07002530 u32 pasid)
Lu Baoluddf09b62020-01-02 08:18:17 +08002531{
2532 int flags = PASID_FLAG_SUPERVISOR_MODE;
2533 struct dma_pte *pgd = domain->pgd;
2534 int agaw, level;
2535
2536 /*
2537 * Skip top levels of page tables for iommu which has
2538 * less agaw than default. Unnecessary for PT mode.
2539 */
2540 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2541 pgd = phys_to_virt(dma_pte_addr(pgd));
2542 if (!dma_pte_present(pgd))
2543 return -ENOMEM;
2544 }
2545
2546 level = agaw_to_level(agaw);
2547 if (level != 4 && level != 5)
2548 return -EINVAL;
2549
2550 flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
2551
2552 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2553 domain->iommu_did[iommu->seq_id],
2554 flags);
2555}
2556
Jon Derrick8038bdb2020-05-27 10:56:15 -06002557static bool dev_is_real_dma_subdevice(struct device *dev)
2558{
2559 return dev && dev_is_pci(dev) &&
2560 pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
2561}
2562
Joerg Roedel5db31562015-07-22 12:40:43 +02002563static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2564 int bus, int devfn,
2565 struct device *dev,
2566 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002567{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002568 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002569 struct device_domain_info *info;
2570 unsigned long flags;
Joerg Roedeld160aca2015-07-22 11:52:53 +02002571 int ret;
Jiang Liu745f2582014-02-19 14:07:26 +08002572
2573 info = alloc_devinfo_mem();
2574 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002575 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002576
Jon Derrick4fda2302020-05-27 10:56:16 -06002577 if (!dev_is_real_dma_subdevice(dev)) {
2578 info->bus = bus;
2579 info->devfn = devfn;
2580 info->segment = iommu->segment;
2581 } else {
2582 struct pci_dev *pdev = to_pci_dev(dev);
2583
2584 info->bus = pdev->bus->number;
2585 info->devfn = pdev->devfn;
2586 info->segment = pci_domain_nr(pdev->bus);
2587 }
2588
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002589 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2590 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2591 info->ats_qdep = 0;
Jiang Liu745f2582014-02-19 14:07:26 +08002592 info->dev = dev;
2593 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002594 info->iommu = iommu;
Lu Baolucc580e42018-07-14 15:46:59 +08002595 info->pasid_table = NULL;
Lu Baolu95587a72019-03-25 09:30:30 +08002596 info->auxd_enabled = 0;
Lu Baolu67b8e022019-03-25 09:30:32 +08002597 INIT_LIST_HEAD(&info->auxiliary_domains);
Jiang Liu745f2582014-02-19 14:07:26 +08002598
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002599 if (dev && dev_is_pci(dev)) {
2600 struct pci_dev *pdev = to_pci_dev(info->dev);
2601
Jean-Philippe Bruckerda656a02020-05-20 17:22:03 +02002602 if (ecap_dev_iotlb_support(iommu->ecap) &&
2603 pci_ats_supported(pdev) &&
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002604 dmar_find_matched_atsr_unit(pdev))
2605 info->ats_supported = 1;
2606
Lu Baolu765b6a92018-12-10 09:58:55 +08002607 if (sm_supported(iommu)) {
2608 if (pasid_supported(iommu)) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002609 int features = pci_pasid_features(pdev);
2610 if (features >= 0)
2611 info->pasid_supported = features | 1;
2612 }
2613
2614 if (info->ats_supported && ecap_prs(iommu->ecap) &&
Ashok Raj3f9a7a12020-07-23 15:37:29 -07002615 pci_pri_supported(pdev))
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002616 info->pri_supported = 1;
2617 }
2618 }
2619
Jiang Liu745f2582014-02-19 14:07:26 +08002620 spin_lock_irqsave(&device_domain_lock, flags);
2621 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002622 found = find_domain(dev);
Joerg Roedelf303e502015-07-23 18:37:13 +02002623
2624 if (!found) {
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002625 struct device_domain_info *info2;
Jon Derrick4fda2302020-05-27 10:56:16 -06002626 info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
2627 info->devfn);
Joerg Roedelf303e502015-07-23 18:37:13 +02002628 if (info2) {
2629 found = info2->domain;
2630 info2->dev = dev;
2631 }
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002632 }
Joerg Roedelf303e502015-07-23 18:37:13 +02002633
Jiang Liu745f2582014-02-19 14:07:26 +08002634 if (found) {
2635 spin_unlock_irqrestore(&device_domain_lock, flags);
2636 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002637 /* Caller must free the original domain */
2638 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002639 }
2640
Joerg Roedeld160aca2015-07-22 11:52:53 +02002641 spin_lock(&iommu->lock);
2642 ret = domain_attach_iommu(domain, iommu);
2643 spin_unlock(&iommu->lock);
2644
2645 if (ret) {
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002646 spin_unlock_irqrestore(&device_domain_lock, flags);
Sudip Mukherjee499f3aa2015-09-18 16:27:07 +05302647 free_devinfo_mem(info);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002648 return NULL;
2649 }
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002650
David Woodhouseb718cd32014-03-09 13:11:33 -07002651 list_add(&info->link, &domain->devices);
2652 list_add(&info->global, &device_domain_list);
2653 if (dev)
Joerg Roedel01b9d4e2020-06-25 15:08:25 +02002654 dev_iommu_priv_set(dev, info);
Lu Baolu0bbeb012018-12-10 09:58:56 +08002655 spin_unlock_irqrestore(&device_domain_lock, flags);
Lu Baolua7fc93f2018-07-14 15:47:00 +08002656
Lu Baolu0bbeb012018-12-10 09:58:56 +08002657 /* PASID table is mandatory for a PCI device in scalable mode. */
2658 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
Lu Baolua7fc93f2018-07-14 15:47:00 +08002659 ret = intel_pasid_alloc_table(dev);
2660 if (ret) {
Bjorn Helgaas932a6522019-02-08 16:06:00 -06002661 dev_err(dev, "PASID table allocation failed\n");
Bjorn Helgaas71753232019-02-08 16:06:15 -06002662 dmar_remove_one_dev_info(dev);
Lu Baolu0bbeb012018-12-10 09:58:56 +08002663 return NULL;
Lu Baolua7fc93f2018-07-14 15:47:00 +08002664 }
Lu Baoluef848b72018-12-10 09:59:01 +08002665
2666 /* Setup the PASID entry for requests without PASID: */
2667 spin_lock(&iommu->lock);
2668 if (hw_pass_through && domain_type_is_si(domain))
2669 ret = intel_pasid_setup_pass_through(iommu, domain,
2670 dev, PASID_RID2PASID);
Lu Baoluddf09b62020-01-02 08:18:17 +08002671 else if (domain_use_first_level(domain))
2672 ret = domain_setup_first_level(iommu, domain, dev,
2673 PASID_RID2PASID);
Lu Baoluef848b72018-12-10 09:59:01 +08002674 else
2675 ret = intel_pasid_setup_second_level(iommu, domain,
2676 dev, PASID_RID2PASID);
2677 spin_unlock(&iommu->lock);
2678 if (ret) {
Bjorn Helgaas932a6522019-02-08 16:06:00 -06002679 dev_err(dev, "Setup RID2PASID failed\n");
Bjorn Helgaas71753232019-02-08 16:06:15 -06002680 dmar_remove_one_dev_info(dev);
Lu Baoluef848b72018-12-10 09:59:01 +08002681 return NULL;
Lu Baolua7fc93f2018-07-14 15:47:00 +08002682 }
2683 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002684
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002685 if (dev && domain_context_mapping(domain, dev)) {
Bjorn Helgaas932a6522019-02-08 16:06:00 -06002686 dev_err(dev, "Domain context map failed\n");
Bjorn Helgaas71753232019-02-08 16:06:15 -06002687 dmar_remove_one_dev_info(dev);
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002688 return NULL;
2689 }
2690
David Woodhouseb718cd32014-03-09 13:11:33 -07002691 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002692}
2693
David Woodhouseb2132032009-06-26 18:50:28 +01002694static int iommu_domain_identity_map(struct dmar_domain *domain,
Tom Murphye70b0812020-05-16 14:21:01 +08002695 unsigned long first_vpfn,
2696 unsigned long last_vpfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002697{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002698 /*
2699 * RMRR range might have overlap with physical memory range,
2700 * clear it first
2701 */
David Woodhousec5395d52009-06-28 16:35:56 +01002702 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002703
Peter Xu87684fd2018-05-04 10:34:53 +08002704 return __domain_mapping(domain, first_vpfn, NULL,
2705 first_vpfn, last_vpfn - first_vpfn + 1,
2706 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002707}
2708
Joerg Roedel301e7ee2019-07-22 16:21:05 +02002709static int md_domain_init(struct dmar_domain *domain, int guest_width);
2710
Matt Kraai071e1372009-08-23 22:30:22 -07002711static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002712{
Lu Baolu4de354e2019-05-25 13:41:27 +08002713 struct dmar_rmrr_unit *rmrr;
2714 struct device *dev;
2715 int i, nid, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002716
Jiang Liuab8dfe22014-07-11 14:19:27 +08002717 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002718 if (!si_domain)
2719 return -EFAULT;
2720
Joerg Roedel301e7ee2019-07-22 16:21:05 +02002721 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002722 domain_exit(si_domain);
2723 return -EFAULT;
2724 }
2725
David Woodhouse19943b02009-08-04 16:19:20 +01002726 if (hw)
2727 return 0;
2728
David Woodhousec7ab48d2009-06-26 19:10:36 +01002729 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002730 unsigned long start_pfn, end_pfn;
2731 int i;
2732
2733 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2734 ret = iommu_domain_identity_map(si_domain,
Tom Murphye70b0812020-05-16 14:21:01 +08002735 mm_to_dma_pfn(start_pfn),
2736 mm_to_dma_pfn(end_pfn));
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002737 if (ret)
2738 return ret;
2739 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002740 }
2741
Lu Baolu4de354e2019-05-25 13:41:27 +08002742 /*
Lu Baolu9235cb132020-01-15 11:03:58 +08002743 * Identity map the RMRRs so that devices with RMRRs could also use
2744 * the si_domain.
Lu Baolu4de354e2019-05-25 13:41:27 +08002745 */
2746 for_each_rmrr_units(rmrr) {
2747 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2748 i, dev) {
2749 unsigned long long start = rmrr->base_address;
2750 unsigned long long end = rmrr->end_address;
2751
Lu Baolu4de354e2019-05-25 13:41:27 +08002752 if (WARN_ON(end < start ||
2753 end >> agaw_to_width(si_domain->agaw)))
2754 continue;
2755
Lu Baolu48f0bcf2020-06-23 07:13:45 +08002756 ret = iommu_domain_identity_map(si_domain,
2757 mm_to_dma_pfn(start >> PAGE_SHIFT),
2758 mm_to_dma_pfn(end >> PAGE_SHIFT));
Lu Baolu4de354e2019-05-25 13:41:27 +08002759 if (ret)
2760 return ret;
2761 }
2762 }
2763
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002764 return 0;
2765}
2766
Joerg Roedel28ccce02015-07-21 14:45:31 +02002767static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002768{
David Woodhouse0ac72662014-03-09 13:19:22 -07002769 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002770 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002771 u8 bus, devfn;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002772
David Woodhouse5913c9b2014-03-09 16:27:31 -07002773 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002774 if (!iommu)
2775 return -ENODEV;
2776
Joerg Roedel5db31562015-07-22 12:40:43 +02002777 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002778 if (ndomain != domain)
2779 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002780
2781 return 0;
2782}
2783
David Woodhouse0b9d9752014-03-09 15:48:15 -07002784static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002785{
2786 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002787 struct device *tmp;
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002788 int i;
2789
Jiang Liu0e242612014-02-19 14:07:34 +08002790 rcu_read_lock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002791 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002792 /*
2793 * Return TRUE if this RMRR contains the device that
2794 * is passed in.
2795 */
2796 for_each_active_dev_scope(rmrr->devices,
2797 rmrr->devices_cnt, i, tmp)
Eric Augere143fd42019-06-03 08:53:33 +02002798 if (tmp == dev ||
2799 is_downstream_to_pci_bridge(dev, tmp)) {
Jiang Liu0e242612014-02-19 14:07:34 +08002800 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002801 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002802 }
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002803 }
Jiang Liu0e242612014-02-19 14:07:34 +08002804 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002805 return false;
2806}
2807
Eric Auger1c5c59f2019-06-03 08:53:36 +02002808/**
2809 * device_rmrr_is_relaxable - Test whether the RMRR of this device
2810 * is relaxable (ie. is allowed to be not enforced under some conditions)
2811 * @dev: device handle
2812 *
2813 * We assume that PCI USB devices with RMRRs have them largely
2814 * for historical reasons and that the RMRR space is not actively used post
2815 * boot. This exclusion may change if vendors begin to abuse it.
2816 *
2817 * The same exception is made for graphics devices, with the requirement that
2818 * any use of the RMRR regions will be torn down before assigning the device
2819 * to a guest.
2820 *
2821 * Return: true if the RMRR is relaxable, false otherwise
2822 */
2823static bool device_rmrr_is_relaxable(struct device *dev)
2824{
2825 struct pci_dev *pdev;
2826
2827 if (!dev_is_pci(dev))
2828 return false;
2829
2830 pdev = to_pci_dev(dev);
2831 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2832 return true;
2833 else
2834 return false;
2835}
2836
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002837/*
2838 * There are a couple cases where we need to restrict the functionality of
2839 * devices associated with RMRRs. The first is when evaluating a device for
2840 * identity mapping because problems exist when devices are moved in and out
2841 * of domains and their respective RMRR information is lost. This means that
2842 * a device with associated RMRRs will never be in a "passthrough" domain.
2843 * The second is use of the device through the IOMMU API. This interface
2844 * expects to have full control of the IOVA space for the device. We cannot
2845 * satisfy both the requirement that RMRR access is maintained and have an
2846 * unencumbered IOVA space. We also have no ability to quiesce the device's
2847 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2848 * We therefore prevent devices associated with an RMRR from participating in
2849 * the IOMMU API, which eliminates them from device assignment.
2850 *
Eric Auger1c5c59f2019-06-03 08:53:36 +02002851 * In both cases, devices which have relaxable RMRRs are not concerned by this
2852 * restriction. See device_rmrr_is_relaxable comment.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002853 */
2854static bool device_is_rmrr_locked(struct device *dev)
2855{
2856 if (!device_has_rmrr(dev))
2857 return false;
2858
Eric Auger1c5c59f2019-06-03 08:53:36 +02002859 if (device_rmrr_is_relaxable(dev))
2860 return false;
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002861
2862 return true;
2863}
2864
Lu Baoluf273a452019-05-25 13:41:26 +08002865/*
2866 * Return the required default domain type for a specific device.
2867 *
2868 * @dev: the device in query
2869 * @startup: true if this is during early boot
2870 *
2871 * Returns:
2872 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2873 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2874 * - 0: both identity and dynamic domains work for this device
2875 */
Lu Baolu0e31a722019-05-25 13:41:34 +08002876static int device_def_domain_type(struct device *dev)
David Woodhouse6941af22009-07-04 18:24:27 +01002877{
David Woodhouse3bdb2592014-03-09 16:03:08 -07002878 if (dev_is_pci(dev)) {
2879 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002880
Lu Baolu89a60792018-10-23 15:45:01 +08002881 /*
2882 * Prevent any device marked as untrusted from getting
2883 * placed into the statically identity mapping domain.
2884 */
2885 if (pdev->untrusted)
Lu Baoluf273a452019-05-25 13:41:26 +08002886 return IOMMU_DOMAIN_DMA;
Lu Baolu89a60792018-10-23 15:45:01 +08002887
David Woodhouse3bdb2592014-03-09 16:03:08 -07002888 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
Lu Baoluf273a452019-05-25 13:41:26 +08002889 return IOMMU_DOMAIN_IDENTITY;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002890
David Woodhouse3bdb2592014-03-09 16:03:08 -07002891 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
Lu Baoluf273a452019-05-25 13:41:26 +08002892 return IOMMU_DOMAIN_IDENTITY;
David Woodhouse3bdb2592014-03-09 16:03:08 -07002893 }
David Woodhouse6941af22009-07-04 18:24:27 +01002894
Lu Baolub89b6602020-01-15 11:03:59 +08002895 return 0;
Lu Baoluf273a452019-05-25 13:41:26 +08002896}
2897
Jiang Liuffebeb42014-11-09 22:48:02 +08002898static void intel_iommu_init_qi(struct intel_iommu *iommu)
2899{
2900 /*
2901 * Start from the sane iommu hardware state.
2902 * If the queued invalidation is already initialized by us
2903 * (for example, while enabling interrupt-remapping) then
2904 * we got the things already rolling from a sane state.
2905 */
2906 if (!iommu->qi) {
2907 /*
2908 * Clear any previous faults.
2909 */
2910 dmar_fault(-1, iommu);
2911 /*
2912 * Disable queued invalidation if supported and already enabled
2913 * before OS handover.
2914 */
2915 dmar_disable_qi(iommu);
2916 }
2917
2918 if (dmar_enable_qi(iommu)) {
2919 /*
2920 * Queued Invalidate not enabled, use Register Based Invalidate
2921 */
2922 iommu->flush.flush_context = __iommu_flush_context;
2923 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002924 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08002925 iommu->name);
2926 } else {
2927 iommu->flush.flush_context = qi_flush_context;
2928 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002929 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08002930 }
2931}
2932
Joerg Roedel091d42e2015-06-12 11:56:10 +02002933static int copy_context_table(struct intel_iommu *iommu,
Dan Williamsdfddb962015-10-09 18:16:46 -04002934 struct root_entry *old_re,
Joerg Roedel091d42e2015-06-12 11:56:10 +02002935 struct context_entry **tbl,
2936 int bus, bool ext)
2937{
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002938 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02002939 struct context_entry *new_ce = NULL, ce;
Dan Williamsdfddb962015-10-09 18:16:46 -04002940 struct context_entry *old_ce = NULL;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02002941 struct root_entry re;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002942 phys_addr_t old_ce_phys;
2943
2944 tbl_idx = ext ? bus * 2 : bus;
Dan Williamsdfddb962015-10-09 18:16:46 -04002945 memcpy(&re, old_re, sizeof(re));
Joerg Roedel091d42e2015-06-12 11:56:10 +02002946
2947 for (devfn = 0; devfn < 256; devfn++) {
2948 /* First calculate the correct index */
2949 idx = (ext ? devfn * 2 : devfn) % 256;
2950
2951 if (idx == 0) {
2952 /* First save what we may have and clean up */
2953 if (new_ce) {
2954 tbl[tbl_idx] = new_ce;
2955 __iommu_flush_cache(iommu, new_ce,
2956 VTD_PAGE_SIZE);
2957 pos = 1;
2958 }
2959
2960 if (old_ce)
Pan Bian829383e2018-11-21 17:53:47 +08002961 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02002962
2963 ret = 0;
2964 if (devfn < 0x80)
Joerg Roedel543c8dc2015-08-13 11:56:59 +02002965 old_ce_phys = root_entry_lctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02002966 else
Joerg Roedel543c8dc2015-08-13 11:56:59 +02002967 old_ce_phys = root_entry_uctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02002968
2969 if (!old_ce_phys) {
2970 if (ext && devfn == 0) {
2971 /* No LCTP, try UCTP */
2972 devfn = 0x7f;
2973 continue;
2974 } else {
2975 goto out;
2976 }
2977 }
2978
2979 ret = -ENOMEM;
Dan Williamsdfddb962015-10-09 18:16:46 -04002980 old_ce = memremap(old_ce_phys, PAGE_SIZE,
2981 MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02002982 if (!old_ce)
2983 goto out;
2984
2985 new_ce = alloc_pgtable_page(iommu->node);
2986 if (!new_ce)
2987 goto out_unmap;
2988
2989 ret = 0;
2990 }
2991
2992 /* Now copy the context entry */
Dan Williamsdfddb962015-10-09 18:16:46 -04002993 memcpy(&ce, old_ce + idx, sizeof(ce));
Joerg Roedel091d42e2015-06-12 11:56:10 +02002994
Joerg Roedelcf484d02015-06-12 12:21:46 +02002995 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02002996 continue;
2997
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002998 did = context_domain_id(&ce);
2999 if (did >= 0 && did < cap_ndoms(iommu->cap))
3000 set_bit(did, iommu->domain_ids);
3001
Joerg Roedelcf484d02015-06-12 12:21:46 +02003002 /*
3003 * We need a marker for copied context entries. This
3004 * marker needs to work for the old format as well as
3005 * for extended context entries.
3006 *
3007 * Bit 67 of the context entry is used. In the old
3008 * format this bit is available to software, in the
3009 * extended format it is the PGE bit, but PGE is ignored
3010 * by HW if PASIDs are disabled (and thus still
3011 * available).
3012 *
3013 * So disable PASIDs first and then mark the entry
3014 * copied. This means that we don't copy PASID
3015 * translations from the old kernel, but this is fine as
3016 * faults there are not fatal.
3017 */
3018 context_clear_pasid_enable(&ce);
3019 context_set_copied(&ce);
3020
Joerg Roedel091d42e2015-06-12 11:56:10 +02003021 new_ce[idx] = ce;
3022 }
3023
3024 tbl[tbl_idx + pos] = new_ce;
3025
3026 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3027
3028out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003029 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003030
3031out:
3032 return ret;
3033}
3034
3035static int copy_translation_tables(struct intel_iommu *iommu)
3036{
3037 struct context_entry **ctxt_tbls;
Dan Williamsdfddb962015-10-09 18:16:46 -04003038 struct root_entry *old_rt;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003039 phys_addr_t old_rt_phys;
3040 int ctxt_table_entries;
3041 unsigned long flags;
3042 u64 rtaddr_reg;
3043 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02003044 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003045
3046 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3047 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02003048 new_ext = !!ecap_ecs(iommu->ecap);
3049
3050 /*
3051 * The RTT bit can only be changed when translation is disabled,
3052 * but disabling translation means to open a window for data
3053 * corruption. So bail out and don't copy anything if we would
3054 * have to change the bit.
3055 */
3056 if (new_ext != ext)
3057 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003058
3059 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3060 if (!old_rt_phys)
3061 return -EINVAL;
3062
Dan Williamsdfddb962015-10-09 18:16:46 -04003063 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003064 if (!old_rt)
3065 return -ENOMEM;
3066
3067 /* This is too big for the stack - allocate it from slab */
3068 ctxt_table_entries = ext ? 512 : 256;
3069 ret = -ENOMEM;
Kees Cook6396bb22018-06-12 14:03:40 -07003070 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003071 if (!ctxt_tbls)
3072 goto out_unmap;
3073
3074 for (bus = 0; bus < 256; bus++) {
3075 ret = copy_context_table(iommu, &old_rt[bus],
3076 ctxt_tbls, bus, ext);
3077 if (ret) {
3078 pr_err("%s: Failed to copy context table for bus %d\n",
3079 iommu->name, bus);
3080 continue;
3081 }
3082 }
3083
3084 spin_lock_irqsave(&iommu->lock, flags);
3085
3086 /* Context tables are copied, now write them to the root_entry table */
3087 for (bus = 0; bus < 256; bus++) {
3088 int idx = ext ? bus * 2 : bus;
3089 u64 val;
3090
3091 if (ctxt_tbls[idx]) {
3092 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3093 iommu->root_entry[bus].lo = val;
3094 }
3095
3096 if (!ext || !ctxt_tbls[idx + 1])
3097 continue;
3098
3099 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3100 iommu->root_entry[bus].hi = val;
3101 }
3102
3103 spin_unlock_irqrestore(&iommu->lock, flags);
3104
3105 kfree(ctxt_tbls);
3106
3107 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3108
3109 ret = 0;
3110
3111out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003112 memunmap(old_rt);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003113
3114 return ret;
3115}
3116
Jacob Pan33753032020-05-16 14:20:51 +08003117#ifdef CONFIG_INTEL_IOMMU_SVM
3118static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
3119{
3120 struct intel_iommu *iommu = data;
3121 ioasid_t ioasid;
3122
3123 if (!iommu)
3124 return INVALID_IOASID;
3125 /*
3126 * VT-d virtual command interface always uses the full 20 bit
3127 * PASID range. Host can partition guest PASID range based on
3128 * policies but it is out of guest's control.
3129 */
3130 if (min < PASID_MIN || max > intel_pasid_max_id)
3131 return INVALID_IOASID;
3132
3133 if (vcmd_alloc_pasid(iommu, &ioasid))
3134 return INVALID_IOASID;
3135
3136 return ioasid;
3137}
3138
3139static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
3140{
3141 struct intel_iommu *iommu = data;
3142
3143 if (!iommu)
3144 return;
3145 /*
3146 * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
3147 * We can only free the PASID when all the devices are unbound.
3148 */
3149 if (ioasid_find(NULL, ioasid, NULL)) {
3150 pr_alert("Cannot free active IOASID %d\n", ioasid);
3151 return;
3152 }
3153 vcmd_free_pasid(iommu, ioasid);
3154}
3155
3156static void register_pasid_allocator(struct intel_iommu *iommu)
3157{
3158 /*
3159 * If we are running in the host, no need for custom allocator
3160 * in that PASIDs are allocated from the host system-wide.
3161 */
3162 if (!cap_caching_mode(iommu->cap))
3163 return;
3164
3165 if (!sm_supported(iommu)) {
3166 pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
3167 return;
3168 }
3169
3170 /*
3171 * Register a custom PASID allocator if we are running in a guest,
3172 * guest PASID must be obtained via virtual command interface.
3173 * There can be multiple vIOMMUs in each guest but only one allocator
3174 * is active. All vIOMMU allocators will eventually be calling the same
3175 * host allocator.
3176 */
3177 if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
3178 return;
3179
3180 pr_info("Register custom PASID allocator\n");
3181 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
3182 iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
3183 iommu->pasid_allocator.pdata = (void *)iommu;
3184 if (ioasid_register_allocator(&iommu->pasid_allocator)) {
3185 pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
3186 /*
3187 * Disable scalable mode on this IOMMU if there
3188 * is no custom allocator. Mixing SM capable vIOMMU
3189 * and non-SM vIOMMU are not supported.
3190 */
3191 intel_iommu_sm = 0;
3192 }
3193}
3194#endif
3195
Joseph Cihulab7792602011-05-03 00:08:37 -07003196static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003197{
3198 struct dmar_drhd_unit *drhd;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003199 struct intel_iommu *iommu;
Lu Baoludf4f3c62019-05-25 13:41:36 +08003200 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003201
3202 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003203 * for each drhd
3204 * allocate root
3205 * initialize and program root entry to not present
3206 * endfor
3207 */
3208 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003209 /*
3210 * lock not needed as this is only incremented in the single
3211 * threaded kernel __init code path all other access are read
3212 * only
3213 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003214 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003215 g_num_of_iommus++;
3216 continue;
3217 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003218 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003219 }
3220
Jiang Liuffebeb42014-11-09 22:48:02 +08003221 /* Preallocate enough resources for IOMMU hot-addition */
3222 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3223 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3224
Weidong Hand9630fe2008-12-08 11:06:32 +08003225 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3226 GFP_KERNEL);
3227 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003228 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003229 ret = -ENOMEM;
3230 goto error;
3231 }
3232
Lu Baolu6a8c6742019-06-12 08:28:47 +08003233 for_each_iommu(iommu, drhd) {
3234 if (drhd->ignored) {
3235 iommu_disable_translation(iommu);
3236 continue;
3237 }
3238
Lu Baolu56283172018-07-14 15:46:54 +08003239 /*
3240 * Find the max pasid size of all IOMMU's in the system.
3241 * We need to ensure the system pasid table is no bigger
3242 * than the smallest supported.
3243 */
Lu Baolu765b6a92018-12-10 09:58:55 +08003244 if (pasid_supported(iommu)) {
Lu Baolu56283172018-07-14 15:46:54 +08003245 u32 temp = 2 << ecap_pss(iommu->ecap);
3246
3247 intel_pasid_max_id = min_t(u32, temp,
3248 intel_pasid_max_id);
3249 }
3250
Weidong Hand9630fe2008-12-08 11:06:32 +08003251 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003252
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003253 intel_iommu_init_qi(iommu);
3254
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003255 ret = iommu_init_domains(iommu);
3256 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003257 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003258
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003259 init_translation_status(iommu);
3260
Joerg Roedel091d42e2015-06-12 11:56:10 +02003261 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3262 iommu_disable_translation(iommu);
3263 clear_translation_pre_enabled(iommu);
3264 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3265 iommu->name);
3266 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003267
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003268 /*
3269 * TBD:
3270 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003271 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003272 */
3273 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003274 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003275 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003276
Joerg Roedel091d42e2015-06-12 11:56:10 +02003277 if (translation_pre_enabled(iommu)) {
3278 pr_info("Translation already enabled - trying to copy translation structures\n");
3279
3280 ret = copy_translation_tables(iommu);
3281 if (ret) {
3282 /*
3283 * We found the IOMMU with translation
3284 * enabled - but failed to copy over the
3285 * old root-entry table. Try to proceed
3286 * by disabling translation now and
3287 * allocating a clean root-entry table.
3288 * This might cause DMAR faults, but
3289 * probably the dump will still succeed.
3290 */
3291 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3292 iommu->name);
3293 iommu_disable_translation(iommu);
3294 clear_translation_pre_enabled(iommu);
3295 } else {
3296 pr_info("Copied translation tables from previous kernel for %s\n",
3297 iommu->name);
3298 }
3299 }
3300
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003301 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003302 hw_pass_through = 0;
Jacob Panff3dc652020-01-02 08:18:03 +08003303 intel_svm_check(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003304 }
3305
Joerg Roedela4c34ff2016-06-17 11:29:48 +02003306 /*
3307 * Now that qi is enabled on all iommus, set the root entry and flush
3308 * caches. This is required on some Intel X58 chipsets, otherwise the
3309 * flush_context function will loop forever and the boot hangs.
3310 */
3311 for_each_active_iommu(iommu, drhd) {
3312 iommu_flush_write_buffer(iommu);
Jacob Pan33753032020-05-16 14:20:51 +08003313#ifdef CONFIG_INTEL_IOMMU_SVM
3314 register_pasid_allocator(iommu);
3315#endif
Joerg Roedela4c34ff2016-06-17 11:29:48 +02003316 iommu_set_root_entry(iommu);
3317 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3318 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3319 }
3320
Suresh Siddhad3f13812011-08-23 17:05:25 -07003321#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
Lu Baolu5daab582019-05-02 09:34:26 +08003322 dmar_map_gfx = 0;
David Woodhouse19943b02009-08-04 16:19:20 +01003323#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003324
Lu Baolu5daab582019-05-02 09:34:26 +08003325 if (!dmar_map_gfx)
3326 iommu_identity_mapping |= IDENTMAP_GFX;
3327
Ashok Raj21e722c2017-01-30 09:39:53 -08003328 check_tylersburg_isoch();
3329
Lu Baolu4de354e2019-05-25 13:41:27 +08003330 ret = si_domain_init(hw_pass_through);
3331 if (ret)
3332 goto free_iommu;
Joerg Roedel86080cc2015-06-12 12:27:16 +02003333
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003334 /*
3335 * for each drhd
3336 * enable fault log
3337 * global invalidate context cache
3338 * global invalidate iotlb
3339 * enable translation
3340 */
Jiang Liu7c919772014-01-06 14:18:18 +08003341 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003342 if (drhd->ignored) {
3343 /*
3344 * we always have to disable PMRs or DMA may fail on
3345 * this device
3346 */
3347 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003348 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003349 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003350 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003351
3352 iommu_flush_write_buffer(iommu);
3353
David Woodhousea222a7f2015-10-07 23:35:18 +01003354#ifdef CONFIG_INTEL_IOMMU_SVM
Lu Baolu765b6a92018-12-10 09:58:55 +08003355 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
Lu Baolua7755c32019-04-19 14:43:29 +08003356 /*
3357 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3358 * could cause possible lock race condition.
3359 */
3360 up_write(&dmar_global_lock);
David Woodhousea222a7f2015-10-07 23:35:18 +01003361 ret = intel_svm_enable_prq(iommu);
Lu Baolua7755c32019-04-19 14:43:29 +08003362 down_write(&dmar_global_lock);
David Woodhousea222a7f2015-10-07 23:35:18 +01003363 if (ret)
3364 goto free_iommu;
3365 }
3366#endif
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003367 ret = dmar_set_interrupt(iommu);
3368 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003369 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003370 }
3371
3372 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003373
3374free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003375 for_each_active_iommu(iommu, drhd) {
3376 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003377 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003378 }
Joerg Roedel13cf0172017-08-11 11:40:10 +02003379
Weidong Hand9630fe2008-12-08 11:06:32 +08003380 kfree(g_iommus);
Joerg Roedel13cf0172017-08-11 11:40:10 +02003381
Jiang Liu989d51f2014-02-19 14:07:21 +08003382error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003383 return ret;
3384}
3385
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003386/* This takes a number of _MM_ pages, not VTD pages */
Omer Peleg2aac6302016-04-20 11:33:57 +03003387static unsigned long intel_alloc_iova(struct device *dev,
David Woodhouse875764d2009-06-28 21:20:51 +01003388 struct dmar_domain *domain,
3389 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003390{
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06003391 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003392
Lu Baolucb8b8922020-01-02 08:18:19 +08003393 /*
3394 * Restrict dma_mask to the width that the iommu can handle.
3395 * First-level translation restricts the input-address to a
3396 * canonical address (i.e., address bits 63:N have the same
3397 * value as address bit [N-1], where N is 48-bits with 4-level
3398 * paging and 57-bits with 5-level paging). Hence, skip bit
3399 * [N-1].
3400 */
3401 if (domain_use_first_level(domain))
3402 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1),
3403 dma_mask);
3404 else
3405 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw),
3406 dma_mask);
3407
Robin Murphy8f6429c2015-07-16 19:40:12 +01003408 /* Ensure we reserve the whole size-aligned region */
3409 nrpages = __roundup_pow_of_two(nrpages);
David Woodhouse875764d2009-06-28 21:20:51 +01003410
3411 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003412 /*
3413 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003414 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003415 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003416 */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003417 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003418 IOVA_PFN(DMA_BIT_MASK(32)), false);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003419 if (iova_pfn)
3420 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003421 }
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003422 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3423 IOVA_PFN(dma_mask), true);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003424 if (unlikely(!iova_pfn)) {
Qian Cai944c9172019-11-22 14:16:54 -05003425 dev_err_once(dev, "Allocating %ld-page iova failed\n",
3426 nrpages);
Omer Peleg2aac6302016-04-20 11:33:57 +03003427 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003428 }
3429
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003430 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003431}
3432
Logan Gunthorpe21d5d272019-01-22 14:30:45 -07003433static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3434 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003435{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003436 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003437 phys_addr_t start_paddr;
Omer Peleg2aac6302016-04-20 11:33:57 +03003438 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003439 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003440 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003441 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003442 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003443
3444 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003445
Lu Baolu6fc70202020-05-06 09:59:47 +08003446 if (unlikely(attach_deferred(dev)))
3447 do_deferred_attach(dev);
3448
Joerg Roedel96d170f2020-02-17 17:27:44 +01003449 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003450 if (!domain)
Christoph Hellwig524a6692018-11-21 19:34:10 +01003451 return DMA_MAPPING_ERROR;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003452
Weidong Han8c11e792008-12-08 15:29:22 +08003453 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003454 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003455
Omer Peleg2aac6302016-04-20 11:33:57 +03003456 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3457 if (!iova_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003458 goto error;
3459
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003460 /*
3461 * Check if DMAR supports zero-length reads on write only
3462 * mappings..
3463 */
3464 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003465 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003466 prot |= DMA_PTE_READ;
3467 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3468 prot |= DMA_PTE_WRITE;
3469 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003470 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003471 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003472 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003473 * is not a big problem
3474 */
Omer Peleg2aac6302016-04-20 11:33:57 +03003475 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003476 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003477 if (ret)
3478 goto error;
3479
Omer Peleg2aac6302016-04-20 11:33:57 +03003480 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
David Woodhouse03d6a242009-06-28 15:33:46 +01003481 start_paddr += paddr & ~PAGE_MASK;
Lu Baolu3b530342019-09-06 14:14:51 +08003482
3483 trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
3484
David Woodhouse03d6a242009-06-28 15:33:46 +01003485 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003486
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003487error:
Omer Peleg2aac6302016-04-20 11:33:57 +03003488 if (iova_pfn)
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003489 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
Bjorn Helgaas932a6522019-02-08 16:06:00 -06003490 dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
3491 size, (unsigned long long)paddr, dir);
Christoph Hellwig524a6692018-11-21 19:34:10 +01003492 return DMA_MAPPING_ERROR;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003493}
3494
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003495static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3496 unsigned long offset, size_t size,
3497 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003498 unsigned long attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003499{
Lu Baolu6fc70202020-05-06 09:59:47 +08003500 return __intel_map_single(dev, page_to_phys(page) + offset,
3501 size, dir, *dev->dma_mask);
Logan Gunthorpe21d5d272019-01-22 14:30:45 -07003502}
3503
3504static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
3505 size_t size, enum dma_data_direction dir,
3506 unsigned long attrs)
3507{
Lu Baolu6fc70202020-05-06 09:59:47 +08003508 return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003509}
3510
Omer Peleg769530e2016-04-20 11:33:25 +03003511static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003512{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003513 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003514 unsigned long start_pfn, last_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003515 unsigned long nrpages;
Omer Peleg2aac6302016-04-20 11:33:57 +03003516 unsigned long iova_pfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003517 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003518 struct page *freelist;
Lu Baoluf7b0c4c2019-04-12 12:26:13 +08003519 struct pci_dev *pdev = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003520
David Woodhouse1525a292014-03-06 16:19:30 +00003521 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003522 BUG_ON(!domain);
3523
Weidong Han8c11e792008-12-08 15:29:22 +08003524 iommu = domain_get_iommu(domain);
3525
Omer Peleg2aac6302016-04-20 11:33:57 +03003526 iova_pfn = IOVA_PFN(dev_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003527
Omer Peleg769530e2016-04-20 11:33:25 +03003528 nrpages = aligned_nrpages(dev_addr, size);
Omer Peleg2aac6302016-04-20 11:33:57 +03003529 start_pfn = mm_to_dma_pfn(iova_pfn);
Omer Peleg769530e2016-04-20 11:33:25 +03003530 last_pfn = start_pfn + nrpages - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003531
Lu Baoluf7b0c4c2019-04-12 12:26:13 +08003532 if (dev_is_pci(dev))
3533 pdev = to_pci_dev(dev);
3534
David Woodhouseea8ea462014-03-05 17:09:32 +00003535 freelist = domain_unmap(domain, start_pfn, last_pfn);
Dmitry Safonoveffa4672019-07-16 22:38:05 +01003536 if (intel_iommu_strict || (pdev && pdev->untrusted) ||
3537 !has_iova_flush_queue(&domain->iovad)) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003538 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003539 nrpages, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003540 /* free iova */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003541 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
David Woodhouseea8ea462014-03-05 17:09:32 +00003542 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003543 } else {
Joerg Roedel13cf0172017-08-11 11:40:10 +02003544 queue_iova(&domain->iovad, iova_pfn, nrpages,
3545 (unsigned long)freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003546 /*
3547 * queue up the release of the unmap to save the 1/6th of the
3548 * cpu used up by the iotlb flush operation...
3549 */
mark gross5e0d2a62008-03-04 15:22:08 -08003550 }
Lu Baolu3b530342019-09-06 14:14:51 +08003551
3552 trace_unmap_single(dev, dev_addr, size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003553}
3554
Jiang Liud41a4ad2014-07-11 14:19:34 +08003555static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3556 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003557 unsigned long attrs)
Jiang Liud41a4ad2014-07-11 14:19:34 +08003558{
Lu Baolu6fc70202020-05-06 09:59:47 +08003559 intel_unmap(dev, dev_addr, size);
Christoph Hellwig9cc0c2a2019-04-10 18:14:07 +02003560}
3561
3562static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
3563 size_t size, enum dma_data_direction dir, unsigned long attrs)
3564{
Lu Baolu6fc70202020-05-06 09:59:47 +08003565 intel_unmap(dev, dev_addr, size);
Jiang Liud41a4ad2014-07-11 14:19:34 +08003566}
3567
David Woodhouse5040a912014-03-09 16:14:00 -07003568static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003569 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003570 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003571{
Christoph Hellwig7ec916f2018-07-05 13:29:55 -06003572 struct page *page = NULL;
3573 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003574
Lu Baolu6fc70202020-05-06 09:59:47 +08003575 if (unlikely(attach_deferred(dev)))
3576 do_deferred_attach(dev);
Christoph Hellwig9cc0c2a2019-04-10 18:14:07 +02003577
Christoph Hellwig7ec916f2018-07-05 13:29:55 -06003578 size = PAGE_ALIGN(size);
3579 order = get_order(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003580
Christoph Hellwig7ec916f2018-07-05 13:29:55 -06003581 if (gfpflags_allow_blocking(flags)) {
3582 unsigned int count = size >> PAGE_SHIFT;
3583
Marek Szyprowskid834c5a2018-08-17 15:49:00 -07003584 page = dma_alloc_from_contiguous(dev, count, order,
3585 flags & __GFP_NOWARN);
Christoph Hellwig7ec916f2018-07-05 13:29:55 -06003586 }
3587
3588 if (!page)
3589 page = alloc_pages(flags, order);
3590 if (!page)
3591 return NULL;
3592 memset(page_address(page), 0, size);
3593
Logan Gunthorpe21d5d272019-01-22 14:30:45 -07003594 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3595 DMA_BIDIRECTIONAL,
3596 dev->coherent_dma_mask);
Christoph Hellwig524a6692018-11-21 19:34:10 +01003597 if (*dma_handle != DMA_MAPPING_ERROR)
Christoph Hellwig7ec916f2018-07-05 13:29:55 -06003598 return page_address(page);
3599 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3600 __free_pages(page, order);
3601
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003602 return NULL;
3603}
3604
David Woodhouse5040a912014-03-09 16:14:00 -07003605static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003606 dma_addr_t dma_handle, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003607{
Christoph Hellwig7ec916f2018-07-05 13:29:55 -06003608 int order;
3609 struct page *page = virt_to_page(vaddr);
3610
3611 size = PAGE_ALIGN(size);
3612 order = get_order(size);
3613
3614 intel_unmap(dev, dma_handle, size);
3615 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3616 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003617}
3618
David Woodhouse5040a912014-03-09 16:14:00 -07003619static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003620 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003621 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003622{
Omer Peleg769530e2016-04-20 11:33:25 +03003623 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3624 unsigned long nrpages = 0;
3625 struct scatterlist *sg;
3626 int i;
3627
3628 for_each_sg(sglist, sg, nelems, i) {
3629 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3630 }
3631
3632 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
Lu Baolu3b530342019-09-06 14:14:51 +08003633
3634 trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003635}
3636
David Woodhouse5040a912014-03-09 16:14:00 -07003637static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003638 enum dma_data_direction dir, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003639{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003640 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003641 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003642 size_t size = 0;
3643 int prot = 0;
Omer Peleg2aac6302016-04-20 11:33:57 +03003644 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003645 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003646 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003647 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003648 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003649
3650 BUG_ON(dir == DMA_NONE);
Lu Baolu6fc70202020-05-06 09:59:47 +08003651
3652 if (unlikely(attach_deferred(dev)))
3653 do_deferred_attach(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003654
Joerg Roedel96d170f2020-02-17 17:27:44 +01003655 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003656 if (!domain)
3657 return 0;
3658
Weidong Han8c11e792008-12-08 15:29:22 +08003659 iommu = domain_get_iommu(domain);
3660
David Woodhouseb536d242009-06-28 14:49:31 +01003661 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003662 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003663
Omer Peleg2aac6302016-04-20 11:33:57 +03003664 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
David Woodhouse5040a912014-03-09 16:14:00 -07003665 *dev->dma_mask);
Omer Peleg2aac6302016-04-20 11:33:57 +03003666 if (!iova_pfn) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003667 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003668 return 0;
3669 }
3670
3671 /*
3672 * Check if DMAR supports zero-length reads on write only
3673 * mappings..
3674 */
3675 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003676 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003677 prot |= DMA_PTE_READ;
3678 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3679 prot |= DMA_PTE_WRITE;
3680
Omer Peleg2aac6302016-04-20 11:33:57 +03003681 start_vpfn = mm_to_dma_pfn(iova_pfn);
David Woodhousee1605492009-06-29 11:17:38 +01003682
Fenghua Yuf5329592009-08-04 15:09:37 -07003683 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003684 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003685 dma_pte_free_pagetable(domain, start_vpfn,
David Dillowbc24c572017-06-28 19:42:23 -07003686 start_vpfn + size - 1,
3687 agaw_to_level(domain->agaw) + 1);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003688 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
David Woodhousee1605492009-06-29 11:17:38 +01003689 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003690 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003691
Lu Baolu984d03a2020-01-02 08:18:11 +08003692 for_each_sg(sglist, sg, nelems, i)
3693 trace_map_sg(dev, i + 1, nelems, sg);
Lu Baolu3b530342019-09-06 14:14:51 +08003694
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003695 return nelems;
3696}
3697
Arvind Sankar9c24eaf2019-10-08 10:33:57 -04003698static u64 intel_get_required_mask(struct device *dev)
3699{
Arvind Sankar9c24eaf2019-10-08 10:33:57 -04003700 return DMA_BIT_MASK(32);
3701}
3702
Christoph Hellwig02b4da52018-09-17 19:10:31 +02003703static const struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003704 .alloc = intel_alloc_coherent,
3705 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003706 .map_sg = intel_map_sg,
3707 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003708 .map_page = intel_map_page,
3709 .unmap_page = intel_unmap_page,
Logan Gunthorpe21d5d272019-01-22 14:30:45 -07003710 .map_resource = intel_map_resource,
Christoph Hellwig9cc0c2a2019-04-10 18:14:07 +02003711 .unmap_resource = intel_unmap_resource,
Christoph Hellwigfec777c2018-03-19 11:38:15 +01003712 .dma_supported = dma_direct_supported,
Christoph Hellwigf9f32322019-08-06 15:01:50 +03003713 .mmap = dma_common_mmap,
3714 .get_sgtable = dma_common_get_sgtable,
Arvind Sankar9c24eaf2019-10-08 10:33:57 -04003715 .get_required_mask = intel_get_required_mask,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003716};
3717
Lu Baolucfb94a32019-09-06 14:14:52 +08003718static void
3719bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
3720 enum dma_data_direction dir, enum dma_sync_target target)
3721{
3722 struct dmar_domain *domain;
3723 phys_addr_t tlb_addr;
3724
3725 domain = find_domain(dev);
3726 if (WARN_ON(!domain))
3727 return;
3728
3729 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
3730 if (is_swiotlb_buffer(tlb_addr))
3731 swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
3732}
3733
3734static dma_addr_t
3735bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
3736 enum dma_data_direction dir, unsigned long attrs,
3737 u64 dma_mask)
3738{
3739 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
3740 struct dmar_domain *domain;
3741 struct intel_iommu *iommu;
3742 unsigned long iova_pfn;
3743 unsigned long nrpages;
3744 phys_addr_t tlb_addr;
3745 int prot = 0;
3746 int ret;
3747
Joerg Roedela11bfde2020-02-17 17:20:59 +01003748 if (unlikely(attach_deferred(dev)))
3749 do_deferred_attach(dev);
3750
Joerg Roedel96d170f2020-02-17 17:27:44 +01003751 domain = find_domain(dev);
Joerg Roedela11bfde2020-02-17 17:20:59 +01003752
Lu Baolucfb94a32019-09-06 14:14:52 +08003753 if (WARN_ON(dir == DMA_NONE || !domain))
3754 return DMA_MAPPING_ERROR;
3755
3756 iommu = domain_get_iommu(domain);
3757 if (WARN_ON(!iommu))
3758 return DMA_MAPPING_ERROR;
3759
3760 nrpages = aligned_nrpages(0, size);
3761 iova_pfn = intel_alloc_iova(dev, domain,
3762 dma_to_mm_pfn(nrpages), dma_mask);
3763 if (!iova_pfn)
3764 return DMA_MAPPING_ERROR;
3765
3766 /*
3767 * Check if DMAR supports zero-length reads on write only
3768 * mappings..
3769 */
3770 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
3771 !cap_zlr(iommu->cap))
3772 prot |= DMA_PTE_READ;
3773 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3774 prot |= DMA_PTE_WRITE;
3775
3776 /*
3777 * If both the physical buffer start address and size are
3778 * page aligned, we don't need to use a bounce page.
3779 */
3780 if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
3781 tlb_addr = swiotlb_tbl_map_single(dev,
3782 __phys_to_dma(dev, io_tlb_start),
3783 paddr, size, aligned_size, dir, attrs);
3784 if (tlb_addr == DMA_MAPPING_ERROR) {
3785 goto swiotlb_error;
3786 } else {
3787 /* Cleanup the padding area. */
3788 void *padding_start = phys_to_virt(tlb_addr);
3789 size_t padding_size = aligned_size;
3790
3791 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
3792 (dir == DMA_TO_DEVICE ||
3793 dir == DMA_BIDIRECTIONAL)) {
3794 padding_start += size;
3795 padding_size -= size;
3796 }
3797
3798 memset(padding_start, 0, padding_size);
3799 }
3800 } else {
3801 tlb_addr = paddr;
3802 }
3803
3804 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3805 tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
3806 if (ret)
3807 goto mapping_error;
3808
3809 trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
3810
3811 return (phys_addr_t)iova_pfn << PAGE_SHIFT;
3812
3813mapping_error:
3814 if (is_swiotlb_buffer(tlb_addr))
3815 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3816 aligned_size, dir, attrs);
3817swiotlb_error:
3818 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3819 dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
3820 size, (unsigned long long)paddr, dir);
3821
3822 return DMA_MAPPING_ERROR;
3823}
3824
3825static void
3826bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
3827 enum dma_data_direction dir, unsigned long attrs)
3828{
3829 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
3830 struct dmar_domain *domain;
3831 phys_addr_t tlb_addr;
3832
3833 domain = find_domain(dev);
3834 if (WARN_ON(!domain))
3835 return;
3836
3837 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
3838 if (WARN_ON(!tlb_addr))
3839 return;
3840
3841 intel_unmap(dev, dev_addr, size);
3842 if (is_swiotlb_buffer(tlb_addr))
3843 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3844 aligned_size, dir, attrs);
3845
3846 trace_bounce_unmap_single(dev, dev_addr, size);
3847}
3848
3849static dma_addr_t
3850bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
3851 size_t size, enum dma_data_direction dir, unsigned long attrs)
3852{
3853 return bounce_map_single(dev, page_to_phys(page) + offset,
3854 size, dir, attrs, *dev->dma_mask);
3855}
3856
3857static dma_addr_t
3858bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3859 enum dma_data_direction dir, unsigned long attrs)
3860{
3861 return bounce_map_single(dev, phys_addr, size,
3862 dir, attrs, *dev->dma_mask);
3863}
3864
3865static void
3866bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
3867 enum dma_data_direction dir, unsigned long attrs)
3868{
3869 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
3870}
3871
3872static void
3873bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
3874 enum dma_data_direction dir, unsigned long attrs)
3875{
3876 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
3877}
3878
3879static void
3880bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3881 enum dma_data_direction dir, unsigned long attrs)
3882{
3883 struct scatterlist *sg;
3884 int i;
3885
3886 for_each_sg(sglist, sg, nelems, i)
3887 bounce_unmap_page(dev, sg->dma_address,
3888 sg_dma_len(sg), dir, attrs);
3889}
3890
3891static int
3892bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3893 enum dma_data_direction dir, unsigned long attrs)
3894{
3895 int i;
3896 struct scatterlist *sg;
3897
3898 for_each_sg(sglist, sg, nelems, i) {
3899 sg->dma_address = bounce_map_page(dev, sg_page(sg),
3900 sg->offset, sg->length,
3901 dir, attrs);
3902 if (sg->dma_address == DMA_MAPPING_ERROR)
3903 goto out_unmap;
3904 sg_dma_len(sg) = sg->length;
3905 }
3906
Lu Baolu984d03a2020-01-02 08:18:11 +08003907 for_each_sg(sglist, sg, nelems, i)
3908 trace_bounce_map_sg(dev, i + 1, nelems, sg);
3909
Lu Baolucfb94a32019-09-06 14:14:52 +08003910 return nelems;
3911
3912out_unmap:
3913 bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
3914 return 0;
3915}
3916
3917static void
3918bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
3919 size_t size, enum dma_data_direction dir)
3920{
3921 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
3922}
3923
3924static void
3925bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
3926 size_t size, enum dma_data_direction dir)
3927{
3928 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
3929}
3930
3931static void
3932bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
3933 int nelems, enum dma_data_direction dir)
3934{
3935 struct scatterlist *sg;
3936 int i;
3937
3938 for_each_sg(sglist, sg, nelems, i)
3939 bounce_sync_single(dev, sg_dma_address(sg),
3940 sg_dma_len(sg), dir, SYNC_FOR_CPU);
3941}
3942
3943static void
3944bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
3945 int nelems, enum dma_data_direction dir)
3946{
3947 struct scatterlist *sg;
3948 int i;
3949
3950 for_each_sg(sglist, sg, nelems, i)
3951 bounce_sync_single(dev, sg_dma_address(sg),
3952 sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
3953}
3954
3955static const struct dma_map_ops bounce_dma_ops = {
3956 .alloc = intel_alloc_coherent,
3957 .free = intel_free_coherent,
3958 .map_sg = bounce_map_sg,
3959 .unmap_sg = bounce_unmap_sg,
3960 .map_page = bounce_map_page,
3961 .unmap_page = bounce_unmap_page,
3962 .sync_single_for_cpu = bounce_sync_single_for_cpu,
3963 .sync_single_for_device = bounce_sync_single_for_device,
3964 .sync_sg_for_cpu = bounce_sync_sg_for_cpu,
3965 .sync_sg_for_device = bounce_sync_sg_for_device,
3966 .map_resource = bounce_map_resource,
3967 .unmap_resource = bounce_unmap_resource,
3968 .dma_supported = dma_direct_supported,
3969};
3970
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003971static inline int iommu_domain_cache_init(void)
3972{
3973 int ret = 0;
3974
3975 iommu_domain_cache = kmem_cache_create("iommu_domain",
3976 sizeof(struct dmar_domain),
3977 0,
3978 SLAB_HWCACHE_ALIGN,
3979
3980 NULL);
3981 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003982 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003983 ret = -ENOMEM;
3984 }
3985
3986 return ret;
3987}
3988
3989static inline int iommu_devinfo_cache_init(void)
3990{
3991 int ret = 0;
3992
3993 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3994 sizeof(struct device_domain_info),
3995 0,
3996 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003997 NULL);
3998 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003999 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004000 ret = -ENOMEM;
4001 }
4002
4003 return ret;
4004}
4005
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004006static int __init iommu_init_mempool(void)
4007{
4008 int ret;
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004009 ret = iova_cache_get();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004010 if (ret)
4011 return ret;
4012
4013 ret = iommu_domain_cache_init();
4014 if (ret)
4015 goto domain_error;
4016
4017 ret = iommu_devinfo_cache_init();
4018 if (!ret)
4019 return ret;
4020
4021 kmem_cache_destroy(iommu_domain_cache);
4022domain_error:
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004023 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004024
4025 return -ENOMEM;
4026}
4027
4028static void __init iommu_exit_mempool(void)
4029{
4030 kmem_cache_destroy(iommu_devinfo_cache);
4031 kmem_cache_destroy(iommu_domain_cache);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004032 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004033}
4034
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004035static void __init init_no_remapping_devices(void)
4036{
4037 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00004038 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08004039 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004040
4041 for_each_drhd_unit(drhd) {
4042 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08004043 for_each_active_dev_scope(drhd->devices,
4044 drhd->devices_cnt, i, dev)
4045 break;
David Woodhouse832bd852014-03-07 15:08:36 +00004046 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004047 if (i == drhd->devices_cnt)
4048 drhd->ignored = 1;
4049 }
4050 }
4051
Jiang Liu7c919772014-01-06 14:18:18 +08004052 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08004053 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004054 continue;
4055
Jiang Liub683b232014-02-19 14:07:32 +08004056 for_each_active_dev_scope(drhd->devices,
4057 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00004058 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004059 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004060 if (i < drhd->devices_cnt)
4061 continue;
4062
David Woodhousec0771df2011-10-14 20:59:46 +01004063 /* This IOMMU has *only* gfx devices. Either bypass it or
4064 set the gfx_mapped flag, as appropriate */
Lu Baolub1012ca2020-07-23 09:34:37 +08004065 drhd->gfx_dedicated = 1;
Lu Baolu2d33b7d2020-09-03 14:51:32 +08004066 if (!dmar_map_gfx)
David Woodhousec0771df2011-10-14 20:59:46 +01004067 drhd->ignored = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004068 }
4069}
4070
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004071#ifdef CONFIG_SUSPEND
4072static int init_iommu_hw(void)
4073{
4074 struct dmar_drhd_unit *drhd;
4075 struct intel_iommu *iommu = NULL;
4076
4077 for_each_active_iommu(iommu, drhd)
4078 if (iommu->qi)
4079 dmar_reenable_qi(iommu);
4080
Joseph Cihulab7792602011-05-03 00:08:37 -07004081 for_each_iommu(iommu, drhd) {
4082 if (drhd->ignored) {
4083 /*
4084 * we always have to disable PMRs or DMA may fail on
4085 * this device
4086 */
4087 if (force_on)
4088 iommu_disable_protect_mem_regions(iommu);
4089 continue;
4090 }
Lu Baolu095303e2019-04-29 09:16:02 +08004091
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004092 iommu_flush_write_buffer(iommu);
4093
4094 iommu_set_root_entry(iommu);
4095
4096 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004097 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08004098 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4099 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07004100 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004101 }
4102
4103 return 0;
4104}
4105
4106static void iommu_flush_all(void)
4107{
4108 struct dmar_drhd_unit *drhd;
4109 struct intel_iommu *iommu;
4110
4111 for_each_active_iommu(iommu, drhd) {
4112 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004113 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004114 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004115 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004116 }
4117}
4118
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004119static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004120{
4121 struct dmar_drhd_unit *drhd;
4122 struct intel_iommu *iommu = NULL;
4123 unsigned long flag;
4124
4125 for_each_active_iommu(iommu, drhd) {
Kees Cook6396bb22018-06-12 14:03:40 -07004126 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004127 GFP_ATOMIC);
4128 if (!iommu->iommu_state)
4129 goto nomem;
4130 }
4131
4132 iommu_flush_all();
4133
4134 for_each_active_iommu(iommu, drhd) {
4135 iommu_disable_translation(iommu);
4136
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004137 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004138
4139 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4140 readl(iommu->reg + DMAR_FECTL_REG);
4141 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4142 readl(iommu->reg + DMAR_FEDATA_REG);
4143 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4144 readl(iommu->reg + DMAR_FEADDR_REG);
4145 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4146 readl(iommu->reg + DMAR_FEUADDR_REG);
4147
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004148 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004149 }
4150 return 0;
4151
4152nomem:
4153 for_each_active_iommu(iommu, drhd)
4154 kfree(iommu->iommu_state);
4155
4156 return -ENOMEM;
4157}
4158
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004159static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004160{
4161 struct dmar_drhd_unit *drhd;
4162 struct intel_iommu *iommu = NULL;
4163 unsigned long flag;
4164
4165 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07004166 if (force_on)
4167 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4168 else
4169 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004170 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004171 }
4172
4173 for_each_active_iommu(iommu, drhd) {
4174
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004175 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004176
4177 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4178 iommu->reg + DMAR_FECTL_REG);
4179 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4180 iommu->reg + DMAR_FEDATA_REG);
4181 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4182 iommu->reg + DMAR_FEADDR_REG);
4183 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4184 iommu->reg + DMAR_FEUADDR_REG);
4185
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004186 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004187 }
4188
4189 for_each_active_iommu(iommu, drhd)
4190 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004191}
4192
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004193static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004194 .resume = iommu_resume,
4195 .suspend = iommu_suspend,
4196};
4197
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004198static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004199{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004200 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004201}
4202
4203#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02004204static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004205#endif /* CONFIG_PM */
4206
Barret Rhodence4cc52b2020-01-15 11:03:57 +08004207static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
4208{
4209 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
4210 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
4211 rmrr->end_address <= rmrr->base_address ||
4212 arch_rmrr_sanity_check(rmrr))
4213 return -EINVAL;
4214
4215 return 0;
4216}
4217
Jiang Liuc2a0b532014-11-09 22:47:56 +08004218int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004219{
4220 struct acpi_dmar_reserved_memory *rmrr;
4221 struct dmar_rmrr_unit *rmrru;
Yian Chenf036c7f2019-10-17 04:39:19 -07004222
4223 rmrr = (struct acpi_dmar_reserved_memory *)header;
Hans de Goede96788c72020-03-09 15:01:38 +01004224 if (rmrr_sanity_check(rmrr)) {
4225 pr_warn(FW_BUG
Barret Rhodenf5a68bb2020-01-15 11:03:56 +08004226 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
4227 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4228 rmrr->base_address, rmrr->end_address,
4229 dmi_get_system_info(DMI_BIOS_VENDOR),
4230 dmi_get_system_info(DMI_BIOS_VERSION),
4231 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede96788c72020-03-09 15:01:38 +01004232 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
4233 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004234
4235 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4236 if (!rmrru)
Eric Auger0659b8d2017-01-19 20:57:53 +00004237 goto out;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004238
4239 rmrru->hdr = header;
Yian Chenf036c7f2019-10-17 04:39:19 -07004240
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004241 rmrru->base_address = rmrr->base_address;
4242 rmrru->end_address = rmrr->end_address;
Eric Auger0659b8d2017-01-19 20:57:53 +00004243
Jiang Liu2e455282014-02-19 14:07:36 +08004244 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4245 ((void *)rmrr) + rmrr->header.length,
4246 &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004247 if (rmrru->devices_cnt && rmrru->devices == NULL)
Eric Auger5f64ce52019-06-03 08:53:31 +02004248 goto free_rmrru;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004249
Jiang Liu2e455282014-02-19 14:07:36 +08004250 list_add(&rmrru->list, &dmar_rmrr_units);
4251
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004252 return 0;
Eric Auger0659b8d2017-01-19 20:57:53 +00004253free_rmrru:
4254 kfree(rmrru);
4255out:
4256 return -ENOMEM;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004257}
4258
Jiang Liu6b197242014-11-09 22:47:58 +08004259static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4260{
4261 struct dmar_atsr_unit *atsru;
4262 struct acpi_dmar_atsr *tmp;
4263
Qian Caic6f4ebd2020-03-17 11:03:26 -04004264 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
4265 dmar_rcu_check()) {
Jiang Liu6b197242014-11-09 22:47:58 +08004266 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4267 if (atsr->segment != tmp->segment)
4268 continue;
4269 if (atsr->header.length != tmp->header.length)
4270 continue;
4271 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4272 return atsru;
4273 }
4274
4275 return NULL;
4276}
4277
4278int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004279{
4280 struct acpi_dmar_atsr *atsr;
4281 struct dmar_atsr_unit *atsru;
4282
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004283 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
Jiang Liu6b197242014-11-09 22:47:58 +08004284 return 0;
4285
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004286 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004287 atsru = dmar_find_atsr(atsr);
4288 if (atsru)
4289 return 0;
4290
4291 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004292 if (!atsru)
4293 return -ENOMEM;
4294
Jiang Liu6b197242014-11-09 22:47:58 +08004295 /*
4296 * If memory is allocated from slab by ACPI _DSM method, we need to
4297 * copy the memory content because the memory buffer will be freed
4298 * on return.
4299 */
4300 atsru->hdr = (void *)(atsru + 1);
4301 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004302 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004303 if (!atsru->include_all) {
4304 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4305 (void *)atsr + atsr->header.length,
4306 &atsru->devices_cnt);
4307 if (atsru->devices_cnt && atsru->devices == NULL) {
4308 kfree(atsru);
4309 return -ENOMEM;
4310 }
4311 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004312
Jiang Liu0e242612014-02-19 14:07:34 +08004313 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004314
4315 return 0;
4316}
4317
Jiang Liu9bdc5312014-01-06 14:18:27 +08004318static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4319{
4320 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4321 kfree(atsru);
4322}
4323
Jiang Liu6b197242014-11-09 22:47:58 +08004324int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4325{
4326 struct acpi_dmar_atsr *atsr;
4327 struct dmar_atsr_unit *atsru;
4328
4329 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4330 atsru = dmar_find_atsr(atsr);
4331 if (atsru) {
4332 list_del_rcu(&atsru->list);
4333 synchronize_rcu();
4334 intel_iommu_free_atsr(atsru);
4335 }
4336
4337 return 0;
4338}
4339
4340int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4341{
4342 int i;
4343 struct device *dev;
4344 struct acpi_dmar_atsr *atsr;
4345 struct dmar_atsr_unit *atsru;
4346
4347 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4348 atsru = dmar_find_atsr(atsr);
4349 if (!atsru)
4350 return 0;
4351
Linus Torvalds194dc872016-07-27 20:03:31 -07004352 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08004353 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4354 i, dev)
4355 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07004356 }
Jiang Liu6b197242014-11-09 22:47:58 +08004357
4358 return 0;
4359}
4360
Jiang Liuffebeb42014-11-09 22:48:02 +08004361static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4362{
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06004363 int sp, ret;
Jiang Liuffebeb42014-11-09 22:48:02 +08004364 struct intel_iommu *iommu = dmaru->iommu;
4365
4366 if (g_iommus[iommu->seq_id])
4367 return 0;
4368
4369 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004370 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004371 iommu->name);
4372 return -ENXIO;
4373 }
4374 if (!ecap_sc_support(iommu->ecap) &&
4375 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004376 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004377 iommu->name);
4378 return -ENXIO;
4379 }
Lu Baolu64229e82020-01-02 08:18:20 +08004380 sp = domain_update_iommu_superpage(NULL, iommu) - 1;
Jiang Liuffebeb42014-11-09 22:48:02 +08004381 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004382 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004383 iommu->name);
4384 return -ENXIO;
4385 }
4386
4387 /*
4388 * Disable translation if already enabled prior to OS handover.
4389 */
4390 if (iommu->gcmd & DMA_GCMD_TE)
4391 iommu_disable_translation(iommu);
4392
4393 g_iommus[iommu->seq_id] = iommu;
4394 ret = iommu_init_domains(iommu);
4395 if (ret == 0)
4396 ret = iommu_alloc_root_entry(iommu);
4397 if (ret)
4398 goto out;
4399
Jacob Panff3dc652020-01-02 08:18:03 +08004400 intel_svm_check(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00004401
Jiang Liuffebeb42014-11-09 22:48:02 +08004402 if (dmaru->ignored) {
4403 /*
4404 * we always have to disable PMRs or DMA may fail on this device
4405 */
4406 if (force_on)
4407 iommu_disable_protect_mem_regions(iommu);
4408 return 0;
4409 }
4410
4411 intel_iommu_init_qi(iommu);
4412 iommu_flush_write_buffer(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01004413
4414#ifdef CONFIG_INTEL_IOMMU_SVM
Lu Baolu765b6a92018-12-10 09:58:55 +08004415 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
David Woodhousea222a7f2015-10-07 23:35:18 +01004416 ret = intel_svm_enable_prq(iommu);
4417 if (ret)
4418 goto disable_iommu;
4419 }
4420#endif
Jiang Liuffebeb42014-11-09 22:48:02 +08004421 ret = dmar_set_interrupt(iommu);
4422 if (ret)
4423 goto disable_iommu;
4424
4425 iommu_set_root_entry(iommu);
4426 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4427 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4428 iommu_enable_translation(iommu);
4429
Jiang Liuffebeb42014-11-09 22:48:02 +08004430 iommu_disable_protect_mem_regions(iommu);
4431 return 0;
4432
4433disable_iommu:
4434 disable_dmar_iommu(iommu);
4435out:
4436 free_dmar_iommu(iommu);
4437 return ret;
4438}
4439
Jiang Liu6b197242014-11-09 22:47:58 +08004440int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4441{
Jiang Liuffebeb42014-11-09 22:48:02 +08004442 int ret = 0;
4443 struct intel_iommu *iommu = dmaru->iommu;
4444
4445 if (!intel_iommu_enabled)
4446 return 0;
4447 if (iommu == NULL)
4448 return -EINVAL;
4449
4450 if (insert) {
4451 ret = intel_iommu_add(dmaru);
4452 } else {
4453 disable_dmar_iommu(iommu);
4454 free_dmar_iommu(iommu);
4455 }
4456
4457 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004458}
4459
Jiang Liu9bdc5312014-01-06 14:18:27 +08004460static void intel_iommu_free_dmars(void)
4461{
4462 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4463 struct dmar_atsr_unit *atsru, *atsr_n;
4464
4465 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4466 list_del(&rmrru->list);
4467 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4468 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004469 }
4470
Jiang Liu9bdc5312014-01-06 14:18:27 +08004471 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4472 list_del(&atsru->list);
4473 intel_iommu_free_atsr(atsru);
4474 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004475}
4476
4477int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4478{
Jiang Liub683b232014-02-19 14:07:32 +08004479 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004480 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004481 struct pci_dev *bridge = NULL;
4482 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004483 struct acpi_dmar_atsr *atsr;
4484 struct dmar_atsr_unit *atsru;
4485
4486 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004487 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004488 bridge = bus->self;
David Woodhoused14053b32015-10-15 09:28:06 +01004489 /* If it's an integrated device, allow ATS */
4490 if (!bridge)
4491 return 1;
4492 /* Connected via non-PCIe: no ATS */
4493 if (!pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004494 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004495 return 0;
David Woodhoused14053b32015-10-15 09:28:06 +01004496 /* If we found the root port, look it up in the ATSR */
Jiang Liub5f82dd2014-02-19 14:07:31 +08004497 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004498 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004499 }
4500
Jiang Liu0e242612014-02-19 14:07:34 +08004501 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004502 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4503 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4504 if (atsr->segment != pci_domain_nr(dev->bus))
4505 continue;
4506
Jiang Liub683b232014-02-19 14:07:32 +08004507 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004508 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004509 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004510
4511 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004512 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004513 }
Jiang Liub683b232014-02-19 14:07:32 +08004514 ret = 0;
4515out:
Jiang Liu0e242612014-02-19 14:07:34 +08004516 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004517
Jiang Liub683b232014-02-19 14:07:32 +08004518 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004519}
4520
Jiang Liu59ce0512014-02-19 14:07:35 +08004521int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4522{
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06004523 int ret;
Jiang Liu59ce0512014-02-19 14:07:35 +08004524 struct dmar_rmrr_unit *rmrru;
4525 struct dmar_atsr_unit *atsru;
4526 struct acpi_dmar_atsr *atsr;
4527 struct acpi_dmar_reserved_memory *rmrr;
4528
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004529 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
Jiang Liu59ce0512014-02-19 14:07:35 +08004530 return 0;
4531
4532 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4533 rmrr = container_of(rmrru->hdr,
4534 struct acpi_dmar_reserved_memory, header);
4535 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4536 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4537 ((void *)rmrr) + rmrr->header.length,
4538 rmrr->segment, rmrru->devices,
4539 rmrru->devices_cnt);
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06004540 if (ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004541 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004542 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004543 dmar_remove_dev_scope(info, rmrr->segment,
4544 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004545 }
4546 }
4547
4548 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4549 if (atsru->include_all)
4550 continue;
4551
4552 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4553 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4554 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4555 (void *)atsr + atsr->header.length,
4556 atsr->segment, atsru->devices,
4557 atsru->devices_cnt);
4558 if (ret > 0)
4559 break;
Bjorn Helgaase083ea5b2019-02-08 16:06:08 -06004560 else if (ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004561 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004562 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu59ce0512014-02-19 14:07:35 +08004563 if (dmar_remove_dev_scope(info, atsr->segment,
4564 atsru->devices, atsru->devices_cnt))
4565 break;
4566 }
4567 }
4568
4569 return 0;
4570}
4571
Jiang Liu75f05562014-02-19 14:07:37 +08004572static int intel_iommu_memory_notifier(struct notifier_block *nb,
4573 unsigned long val, void *v)
4574{
4575 struct memory_notify *mhp = v;
Tom Murphye70b0812020-05-16 14:21:01 +08004576 unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4577 unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
4578 mhp->nr_pages - 1);
Jiang Liu75f05562014-02-19 14:07:37 +08004579
4580 switch (val) {
4581 case MEM_GOING_ONLINE:
Tom Murphye70b0812020-05-16 14:21:01 +08004582 if (iommu_domain_identity_map(si_domain,
4583 start_vpfn, last_vpfn)) {
4584 pr_warn("Failed to build identity map for [%lx-%lx]\n",
4585 start_vpfn, last_vpfn);
Jiang Liu75f05562014-02-19 14:07:37 +08004586 return NOTIFY_BAD;
4587 }
4588 break;
4589
4590 case MEM_OFFLINE:
4591 case MEM_CANCEL_ONLINE:
Tom Murphye70b0812020-05-16 14:21:01 +08004592 {
Jiang Liu75f05562014-02-19 14:07:37 +08004593 struct dmar_drhd_unit *drhd;
4594 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004595 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004596
Tom Murphye70b0812020-05-16 14:21:01 +08004597 freelist = domain_unmap(si_domain,
4598 start_vpfn, last_vpfn);
David Woodhouseea8ea462014-03-05 17:09:32 +00004599
Jiang Liu75f05562014-02-19 14:07:37 +08004600 rcu_read_lock();
4601 for_each_active_iommu(iommu, drhd)
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004602 iommu_flush_iotlb_psi(iommu, si_domain,
Tom Murphye70b0812020-05-16 14:21:01 +08004603 start_vpfn, mhp->nr_pages,
David Woodhouseea8ea462014-03-05 17:09:32 +00004604 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004605 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004606 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004607 }
4608 break;
4609 }
4610
4611 return NOTIFY_OK;
4612}
4613
4614static struct notifier_block intel_iommu_memory_nb = {
4615 .notifier_call = intel_iommu_memory_notifier,
4616 .priority = 0
4617};
4618
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004619static void free_all_cpu_cached_iovas(unsigned int cpu)
4620{
4621 int i;
4622
4623 for (i = 0; i < g_num_of_iommus; i++) {
4624 struct intel_iommu *iommu = g_iommus[i];
4625 struct dmar_domain *domain;
Aaron Campbell0caa7612016-07-02 21:23:24 -03004626 int did;
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004627
4628 if (!iommu)
4629 continue;
4630
Jan Niehusmann3bd4f912016-06-06 14:20:11 +02004631 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
Aaron Campbell0caa7612016-07-02 21:23:24 -03004632 domain = get_iommu_domain(iommu, (u16)did);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004633
Tom Murphye70b0812020-05-16 14:21:01 +08004634 if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004635 continue;
Tom Murphye70b0812020-05-16 14:21:01 +08004636
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004637 free_cpu_cached_iovas(cpu, &domain->iovad);
4638 }
4639 }
4640}
4641
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004642static int intel_iommu_cpu_dead(unsigned int cpu)
Omer Pelegaa473242016-04-20 11:33:02 +03004643{
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004644 free_all_cpu_cached_iovas(cpu);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004645 return 0;
Omer Pelegaa473242016-04-20 11:33:02 +03004646}
4647
Joerg Roedel161b28a2017-03-28 17:04:52 +02004648static void intel_disable_iommus(void)
4649{
4650 struct intel_iommu *iommu = NULL;
4651 struct dmar_drhd_unit *drhd;
4652
4653 for_each_iommu(iommu, drhd)
4654 iommu_disable_translation(iommu);
4655}
4656
Deepa Dinamani6c3a44e2019-11-10 09:27:44 -08004657void intel_iommu_shutdown(void)
4658{
4659 struct dmar_drhd_unit *drhd;
4660 struct intel_iommu *iommu = NULL;
4661
4662 if (no_iommu || dmar_disabled)
4663 return;
4664
4665 down_write(&dmar_global_lock);
4666
4667 /* Disable PMRs explicitly here. */
4668 for_each_iommu(iommu, drhd)
4669 iommu_disable_protect_mem_regions(iommu);
4670
4671 /* Make sure the IOMMUs are switched off */
4672 intel_disable_iommus();
4673
4674 up_write(&dmar_global_lock);
4675}
4676
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004677static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4678{
Joerg Roedel2926a2aa2017-08-14 17:19:26 +02004679 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4680
4681 return container_of(iommu_dev, struct intel_iommu, iommu);
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004682}
4683
Alex Williamsona5459cf2014-06-12 16:12:31 -06004684static ssize_t intel_iommu_show_version(struct device *dev,
4685 struct device_attribute *attr,
4686 char *buf)
4687{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004688 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004689 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4690 return sprintf(buf, "%d:%d\n",
4691 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4692}
4693static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4694
4695static ssize_t intel_iommu_show_address(struct device *dev,
4696 struct device_attribute *attr,
4697 char *buf)
4698{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004699 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004700 return sprintf(buf, "%llx\n", iommu->reg_phys);
4701}
4702static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4703
4704static ssize_t intel_iommu_show_cap(struct device *dev,
4705 struct device_attribute *attr,
4706 char *buf)
4707{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004708 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004709 return sprintf(buf, "%llx\n", iommu->cap);
4710}
4711static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4712
4713static ssize_t intel_iommu_show_ecap(struct device *dev,
4714 struct device_attribute *attr,
4715 char *buf)
4716{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004717 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004718 return sprintf(buf, "%llx\n", iommu->ecap);
4719}
4720static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4721
Alex Williamson2238c082015-07-14 15:24:53 -06004722static ssize_t intel_iommu_show_ndoms(struct device *dev,
4723 struct device_attribute *attr,
4724 char *buf)
4725{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004726 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004727 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4728}
4729static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4730
4731static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4732 struct device_attribute *attr,
4733 char *buf)
4734{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004735 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004736 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4737 cap_ndoms(iommu->cap)));
4738}
4739static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4740
Alex Williamsona5459cf2014-06-12 16:12:31 -06004741static struct attribute *intel_iommu_attrs[] = {
4742 &dev_attr_version.attr,
4743 &dev_attr_address.attr,
4744 &dev_attr_cap.attr,
4745 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004746 &dev_attr_domains_supported.attr,
4747 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004748 NULL,
4749};
4750
4751static struct attribute_group intel_iommu_group = {
4752 .name = "intel-iommu",
4753 .attrs = intel_iommu_attrs,
4754};
4755
4756const struct attribute_group *intel_iommu_groups[] = {
4757 &intel_iommu_group,
4758 NULL,
4759};
4760
Rajat Jain99b50be2020-07-07 15:46:03 -07004761static inline bool has_external_pci(void)
Lu Baolu89a60792018-10-23 15:45:01 +08004762{
4763 struct pci_dev *pdev = NULL;
Lu Baolu89a60792018-10-23 15:45:01 +08004764
Lu Baoluc5a5dc42019-09-06 14:14:50 +08004765 for_each_pci_dev(pdev)
Rajat Jain99b50be2020-07-07 15:46:03 -07004766 if (pdev->external_facing)
Lu Baoluc5a5dc42019-09-06 14:14:50 +08004767 return true;
Lu Baolu89a60792018-10-23 15:45:01 +08004768
Lu Baoluc5a5dc42019-09-06 14:14:50 +08004769 return false;
4770}
Lu Baolu89a60792018-10-23 15:45:01 +08004771
Lu Baoluc5a5dc42019-09-06 14:14:50 +08004772static int __init platform_optin_force_iommu(void)
4773{
Rajat Jain99b50be2020-07-07 15:46:03 -07004774 if (!dmar_platform_optin() || no_platform_optin || !has_external_pci())
Lu Baolu89a60792018-10-23 15:45:01 +08004775 return 0;
4776
4777 if (no_iommu || dmar_disabled)
4778 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4779
4780 /*
4781 * If Intel-IOMMU is disabled by default, we will apply identity
4782 * map for all devices except those marked as being untrusted.
4783 */
4784 if (dmar_disabled)
Lu Baolub89b6602020-01-15 11:03:59 +08004785 iommu_set_default_passthrough(false);
Lu Baolu89a60792018-10-23 15:45:01 +08004786
4787 dmar_disabled = 0;
Lu Baolu89a60792018-10-23 15:45:01 +08004788 no_iommu = 0;
4789
4790 return 1;
4791}
4792
Lu Baolufa212a92019-05-25 13:41:31 +08004793static int __init probe_acpi_namespace_devices(void)
4794{
4795 struct dmar_drhd_unit *drhd;
Qian Caiaf88ec32019-06-03 10:05:19 -04004796 /* To avoid a -Wunused-but-set-variable warning. */
4797 struct intel_iommu *iommu __maybe_unused;
Lu Baolufa212a92019-05-25 13:41:31 +08004798 struct device *dev;
4799 int i, ret = 0;
4800
4801 for_each_active_iommu(iommu, drhd) {
4802 for_each_active_dev_scope(drhd->devices,
4803 drhd->devices_cnt, i, dev) {
4804 struct acpi_device_physical_node *pn;
4805 struct iommu_group *group;
4806 struct acpi_device *adev;
4807
4808 if (dev->bus != &acpi_bus_type)
4809 continue;
4810
4811 adev = to_acpi_device(dev);
4812 mutex_lock(&adev->physical_node_lock);
4813 list_for_each_entry(pn,
4814 &adev->physical_node_list, node) {
4815 group = iommu_group_get(pn->dev);
4816 if (group) {
4817 iommu_group_put(group);
4818 continue;
4819 }
4820
4821 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4822 ret = iommu_probe_device(pn->dev);
4823 if (ret)
4824 break;
4825 }
4826 mutex_unlock(&adev->physical_node_lock);
4827
4828 if (ret)
4829 return ret;
4830 }
4831 }
4832
4833 return 0;
4834}
4835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004836int __init intel_iommu_init(void)
4837{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004838 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004839 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004840 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004841
Lu Baolu89a60792018-10-23 15:45:01 +08004842 /*
4843 * Intel IOMMU is required for a TXT/tboot launch or platform
4844 * opt in, so enforce that.
4845 */
4846 force_on = tboot_force_iommu() || platform_optin_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004847
Jiang Liu3a5670e2014-02-19 14:07:33 +08004848 if (iommu_init_mempool()) {
4849 if (force_on)
4850 panic("tboot: Failed to initialize iommu memory\n");
4851 return -ENOMEM;
4852 }
4853
4854 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004855 if (dmar_table_init()) {
4856 if (force_on)
4857 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004858 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004859 }
4860
Suresh Siddhac2c72862011-08-23 17:05:19 -07004861 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004862 if (force_on)
4863 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004864 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004865 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004866
Joerg Roedelec154bf2017-10-06 15:00:53 +02004867 up_write(&dmar_global_lock);
4868
4869 /*
4870 * The bus notifier takes the dmar_global_lock, so lockdep will
4871 * complain later when we register it under the lock.
4872 */
4873 dmar_register_bus_notifier();
4874
4875 down_write(&dmar_global_lock);
4876
Megha Dey1da83472020-03-14 11:39:59 +08004877 if (!no_iommu)
4878 intel_iommu_debugfs_init();
4879
Joerg Roedel161b28a2017-03-28 17:04:52 +02004880 if (no_iommu || dmar_disabled) {
4881 /*
Shaohua Libfd20f12017-04-26 09:18:35 -07004882 * We exit the function here to ensure IOMMU's remapping and
4883 * mempool aren't setup, which means that the IOMMU's PMRs
4884 * won't be disabled via the call to init_dmars(). So disable
4885 * it explicitly here. The PMRs were setup by tboot prior to
4886 * calling SENTER, but the kernel is expected to reset/tear
4887 * down the PMRs.
4888 */
4889 if (intel_iommu_tboot_noforce) {
4890 for_each_iommu(iommu, drhd)
4891 iommu_disable_protect_mem_regions(iommu);
4892 }
4893
4894 /*
Joerg Roedel161b28a2017-03-28 17:04:52 +02004895 * Make sure the IOMMUs are switched off, even when we
4896 * boot into a kexec kernel and the previous kernel left
4897 * them enabled
4898 */
4899 intel_disable_iommus();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004900 goto out_free_dmar;
Joerg Roedel161b28a2017-03-28 17:04:52 +02004901 }
Suresh Siddha2ae21012008-07-10 11:16:43 -07004902
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004903 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004904 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004905
4906 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004907 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004908
Joseph Cihula51a63e62011-03-21 11:04:24 -07004909 if (dmar_init_reserved_ranges()) {
4910 if (force_on)
4911 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004912 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004913 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004914
Lu Baolucf1ec452019-05-02 09:34:25 +08004915 if (dmar_map_gfx)
4916 intel_iommu_gfx_mapped = 1;
4917
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004918 init_no_remapping_devices();
4919
Joseph Cihulab7792602011-05-03 00:08:37 -07004920 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004921 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004922 if (force_on)
4923 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004924 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004925 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004926 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004927 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004928
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004929 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004930
Qian Cai2d48ea02020-03-05 15:00:46 -05004931 down_read(&dmar_global_lock);
Joerg Roedel39ab9552017-02-01 16:56:46 +01004932 for_each_active_iommu(iommu, drhd) {
4933 iommu_device_sysfs_add(&iommu->iommu, NULL,
4934 intel_iommu_groups,
4935 "%s", iommu->name);
4936 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4937 iommu_device_register(&iommu->iommu);
4938 }
Qian Cai2d48ea02020-03-05 15:00:46 -05004939 up_read(&dmar_global_lock);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004940
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004941 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Jiang Liu75f05562014-02-19 14:07:37 +08004942 if (si_domain && !hw_pass_through)
4943 register_memory_notifier(&intel_iommu_memory_nb);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004944 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4945 intel_iommu_cpu_dead);
Lu Baolud8190dc2019-05-25 13:41:25 +08004946
Lu Baolud5692d42019-06-12 08:28:49 +08004947 down_read(&dmar_global_lock);
Lu Baolufa212a92019-05-25 13:41:31 +08004948 if (probe_acpi_namespace_devices())
4949 pr_warn("ACPI name space devices didn't probe correctly\n");
4950
Lu Baolud8190dc2019-05-25 13:41:25 +08004951 /* Finally, we enable the DMA remapping hardware. */
4952 for_each_iommu(iommu, drhd) {
Lu Baolu6a8c6742019-06-12 08:28:47 +08004953 if (!drhd->ignored && !translation_pre_enabled(iommu))
Lu Baolud8190dc2019-05-25 13:41:25 +08004954 iommu_enable_translation(iommu);
4955
4956 iommu_disable_protect_mem_regions(iommu);
4957 }
Qian Cai2d48ea02020-03-05 15:00:46 -05004958 up_read(&dmar_global_lock);
4959
Lu Baolud8190dc2019-05-25 13:41:25 +08004960 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4961
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004962 intel_iommu_enabled = 1;
4963
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004964 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004965
4966out_free_reserved_range:
4967 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004968out_free_dmar:
4969 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004970 up_write(&dmar_global_lock);
4971 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004972 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004973}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004974
Lu Baolu0ce4a852019-08-26 16:50:56 +08004975static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4976{
4977 struct intel_iommu *iommu = opaque;
4978
4979 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4980 return 0;
4981}
4982
4983/*
4984 * NB - intel-iommu lacks any sort of reference counting for the users of
4985 * dependent devices. If multiple endpoints have intersecting dependent
4986 * devices, unbinding the driver from any one of them will possibly leave
4987 * the others unable to operate.
4988 */
4989static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4990{
4991 if (!iommu || !dev || !dev_is_pci(dev))
4992 return;
4993
4994 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4995}
4996
Joerg Roedel127c7612015-07-23 17:44:46 +02004997static void __dmar_remove_one_dev_info(struct device_domain_info *info)
Weidong Hanc7151a82008-12-08 22:51:37 +08004998{
Lu Baolu942067f2019-05-25 13:41:29 +08004999 struct dmar_domain *domain;
Weidong Hanc7151a82008-12-08 22:51:37 +08005000 struct intel_iommu *iommu;
5001 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08005002
Joerg Roedel55d94042015-07-22 16:50:40 +02005003 assert_spin_locked(&device_domain_lock);
5004
Joerg Roedelb608ac32015-07-21 18:19:08 +02005005 if (WARN_ON(!info))
Weidong Hanc7151a82008-12-08 22:51:37 +08005006 return;
5007
Joerg Roedel127c7612015-07-23 17:44:46 +02005008 iommu = info->iommu;
Lu Baolu942067f2019-05-25 13:41:29 +08005009 domain = info->domain;
Joerg Roedel127c7612015-07-23 17:44:46 +02005010
5011 if (info->dev) {
Lu Baoluef848b72018-12-10 09:59:01 +08005012 if (dev_is_pci(info->dev) && sm_supported(iommu))
5013 intel_pasid_tear_down_entry(iommu, info->dev,
Lu Baolu37e91bd2020-05-16 14:20:57 +08005014 PASID_RID2PASID, false);
Lu Baoluef848b72018-12-10 09:59:01 +08005015
Joerg Roedel127c7612015-07-23 17:44:46 +02005016 iommu_disable_dev_iotlb(info);
Jon Derrick8038bdb2020-05-27 10:56:15 -06005017 if (!dev_is_real_dma_subdevice(info->dev))
5018 domain_context_clear(iommu, info->dev);
Lu Baolua7fc93f2018-07-14 15:47:00 +08005019 intel_pasid_free_table(info->dev);
Joerg Roedel127c7612015-07-23 17:44:46 +02005020 }
5021
Joerg Roedelb608ac32015-07-21 18:19:08 +02005022 unlink_domain_info(info);
Roland Dreier3e7abe22011-07-20 06:22:21 -07005023
Joerg Roedeld160aca2015-07-22 11:52:53 +02005024 spin_lock_irqsave(&iommu->lock, flags);
Lu Baolu942067f2019-05-25 13:41:29 +08005025 domain_detach_iommu(domain, iommu);
Joerg Roedeld160aca2015-07-22 11:52:53 +02005026 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02005027
5028 free_devinfo_mem(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08005029}
5030
Bjorn Helgaas71753232019-02-08 16:06:15 -06005031static void dmar_remove_one_dev_info(struct device *dev)
Joerg Roedel55d94042015-07-22 16:50:40 +02005032{
Joerg Roedel127c7612015-07-23 17:44:46 +02005033 struct device_domain_info *info;
Joerg Roedel55d94042015-07-22 16:50:40 +02005034 unsigned long flags;
5035
Weidong Hanc7151a82008-12-08 22:51:37 +08005036 spin_lock_irqsave(&device_domain_lock, flags);
Lu Baolue85bb992020-05-16 14:20:52 +08005037 info = get_domain_info(dev);
5038 if (info)
Lu Baoluae23bfb62019-08-06 08:14:08 +08005039 __dmar_remove_one_dev_info(info);
Weidong Han5e98c4b2008-12-08 23:03:27 +08005040 spin_unlock_irqrestore(&device_domain_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08005041}
5042
Joerg Roedel301e7ee2019-07-22 16:21:05 +02005043static int md_domain_init(struct dmar_domain *domain, int guest_width)
5044{
5045 int adjust_width;
5046
Joerg Roedel301e7ee2019-07-22 16:21:05 +02005047 /* calculate AGAW */
5048 domain->gaw = guest_width;
5049 adjust_width = guestwidth_to_adjustwidth(guest_width);
5050 domain->agaw = width_to_agaw(adjust_width);
5051
5052 domain->iommu_coherency = 0;
5053 domain->iommu_snooping = 0;
5054 domain->iommu_superpage = 0;
5055 domain->max_addr = 0;
5056
5057 /* always allocate the top pgd */
5058 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5059 if (!domain->pgd)
5060 return -ENOMEM;
5061 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5062 return 0;
5063}
5064
Tom Murphye70b0812020-05-16 14:21:01 +08005065static void intel_init_iova_domain(struct dmar_domain *dmar_domain)
5066{
5067 init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
5068 copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad);
5069
5070 if (!intel_iommu_strict &&
5071 init_iova_flush_queue(&dmar_domain->iovad,
5072 iommu_flush_iova, iova_entry_free))
5073 pr_info("iova flush queue initialization failed\n");
5074}
5075
Joerg Roedel00a77de2015-03-26 13:43:08 +01005076static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03005077{
Joerg Roedel5d450802008-12-03 14:52:32 +01005078 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01005079 struct iommu_domain *domain;
5080
Lu Baolu4de354e2019-05-25 13:41:27 +08005081 switch (type) {
Lu Baolufa954e62019-05-25 13:41:28 +08005082 case IOMMU_DOMAIN_DMA:
Lu Baolu4de354e2019-05-25 13:41:27 +08005083 case IOMMU_DOMAIN_UNMANAGED:
Lu Baolufa954e62019-05-25 13:41:28 +08005084 dmar_domain = alloc_domain(0);
Lu Baolu4de354e2019-05-25 13:41:27 +08005085 if (!dmar_domain) {
5086 pr_err("Can't allocate dmar_domain\n");
5087 return NULL;
5088 }
Joerg Roedel301e7ee2019-07-22 16:21:05 +02005089 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Lu Baolu4de354e2019-05-25 13:41:27 +08005090 pr_err("Domain initialization failed\n");
5091 domain_exit(dmar_domain);
5092 return NULL;
5093 }
Lu Baolufa954e62019-05-25 13:41:28 +08005094
Tom Murphye70b0812020-05-16 14:21:01 +08005095 if (type == IOMMU_DOMAIN_DMA)
5096 intel_init_iova_domain(dmar_domain);
Lu Baolufa954e62019-05-25 13:41:28 +08005097
Lu Baolu4de354e2019-05-25 13:41:27 +08005098 domain_update_iommu_cap(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03005099
Lu Baolu4de354e2019-05-25 13:41:27 +08005100 domain = &dmar_domain->domain;
5101 domain->geometry.aperture_start = 0;
5102 domain->geometry.aperture_end =
5103 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5104 domain->geometry.force_aperture = true;
5105
5106 return domain;
5107 case IOMMU_DOMAIN_IDENTITY:
5108 return &si_domain->domain;
5109 default:
Joerg Roedel00a77de2015-03-26 13:43:08 +01005110 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03005111 }
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005112
Lu Baolu4de354e2019-05-25 13:41:27 +08005113 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03005114}
Kay, Allen M38717942008-09-09 18:37:29 +03005115
Joerg Roedel00a77de2015-03-26 13:43:08 +01005116static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03005117{
Lu Baolu4de354e2019-05-25 13:41:27 +08005118 if (domain != &si_domain->domain)
5119 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03005120}
Kay, Allen M38717942008-09-09 18:37:29 +03005121
Lu Baolu67b8e022019-03-25 09:30:32 +08005122/*
5123 * Check whether a @domain could be attached to the @dev through the
5124 * aux-domain attach/detach APIs.
5125 */
5126static inline bool
5127is_aux_domain(struct device *dev, struct iommu_domain *domain)
5128{
Lu Baolue85bb992020-05-16 14:20:52 +08005129 struct device_domain_info *info = get_domain_info(dev);
Lu Baolu67b8e022019-03-25 09:30:32 +08005130
5131 return info && info->auxd_enabled &&
5132 domain->type == IOMMU_DOMAIN_UNMANAGED;
5133}
5134
5135static void auxiliary_link_device(struct dmar_domain *domain,
5136 struct device *dev)
5137{
Lu Baolue85bb992020-05-16 14:20:52 +08005138 struct device_domain_info *info = get_domain_info(dev);
Lu Baolu67b8e022019-03-25 09:30:32 +08005139
5140 assert_spin_locked(&device_domain_lock);
5141 if (WARN_ON(!info))
5142 return;
5143
5144 domain->auxd_refcnt++;
5145 list_add(&domain->auxd, &info->auxiliary_domains);
5146}
5147
5148static void auxiliary_unlink_device(struct dmar_domain *domain,
5149 struct device *dev)
5150{
Lu Baolue85bb992020-05-16 14:20:52 +08005151 struct device_domain_info *info = get_domain_info(dev);
Lu Baolu67b8e022019-03-25 09:30:32 +08005152
5153 assert_spin_locked(&device_domain_lock);
5154 if (WARN_ON(!info))
5155 return;
5156
5157 list_del(&domain->auxd);
5158 domain->auxd_refcnt--;
5159
5160 if (!domain->auxd_refcnt && domain->default_pasid > 0)
Jacob Pan59a62332020-01-02 08:18:08 +08005161 ioasid_free(domain->default_pasid);
Lu Baolu67b8e022019-03-25 09:30:32 +08005162}
5163
5164static int aux_domain_add_dev(struct dmar_domain *domain,
5165 struct device *dev)
5166{
5167 int ret;
Lu Baolu67b8e022019-03-25 09:30:32 +08005168 unsigned long flags;
5169 struct intel_iommu *iommu;
5170
Lu Baoludd6692f2020-07-24 09:49:21 +08005171 iommu = device_to_iommu(dev, NULL, NULL);
Lu Baolu67b8e022019-03-25 09:30:32 +08005172 if (!iommu)
5173 return -ENODEV;
5174
5175 if (domain->default_pasid <= 0) {
Fenghua Yuc7b6bac2020-09-15 09:30:05 -07005176 u32 pasid;
Lu Baolu67b8e022019-03-25 09:30:32 +08005177
Jacob Pan59a62332020-01-02 08:18:08 +08005178 /* No private data needed for the default pasid */
5179 pasid = ioasid_alloc(NULL, PASID_MIN,
5180 pci_max_pasids(to_pci_dev(dev)) - 1,
5181 NULL);
5182 if (pasid == INVALID_IOASID) {
Lu Baolu67b8e022019-03-25 09:30:32 +08005183 pr_err("Can't allocate default pasid\n");
5184 return -ENODEV;
5185 }
5186 domain->default_pasid = pasid;
5187 }
5188
5189 spin_lock_irqsave(&device_domain_lock, flags);
5190 /*
5191 * iommu->lock must be held to attach domain to iommu and setup the
5192 * pasid entry for second level translation.
5193 */
5194 spin_lock(&iommu->lock);
5195 ret = domain_attach_iommu(domain, iommu);
5196 if (ret)
5197 goto attach_failed;
5198
5199 /* Setup the PASID entry for mediated devices: */
Lu Baoluddf09b62020-01-02 08:18:17 +08005200 if (domain_use_first_level(domain))
5201 ret = domain_setup_first_level(iommu, domain, dev,
5202 domain->default_pasid);
5203 else
5204 ret = intel_pasid_setup_second_level(iommu, domain, dev,
5205 domain->default_pasid);
Lu Baolu67b8e022019-03-25 09:30:32 +08005206 if (ret)
5207 goto table_failed;
5208 spin_unlock(&iommu->lock);
5209
5210 auxiliary_link_device(domain, dev);
5211
5212 spin_unlock_irqrestore(&device_domain_lock, flags);
5213
5214 return 0;
5215
5216table_failed:
5217 domain_detach_iommu(domain, iommu);
5218attach_failed:
5219 spin_unlock(&iommu->lock);
5220 spin_unlock_irqrestore(&device_domain_lock, flags);
5221 if (!domain->auxd_refcnt && domain->default_pasid > 0)
Jacob Pan59a62332020-01-02 08:18:08 +08005222 ioasid_free(domain->default_pasid);
Lu Baolu67b8e022019-03-25 09:30:32 +08005223
5224 return ret;
5225}
5226
5227static void aux_domain_remove_dev(struct dmar_domain *domain,
5228 struct device *dev)
5229{
5230 struct device_domain_info *info;
5231 struct intel_iommu *iommu;
5232 unsigned long flags;
5233
5234 if (!is_aux_domain(dev, &domain->domain))
5235 return;
5236
5237 spin_lock_irqsave(&device_domain_lock, flags);
Lu Baolue85bb992020-05-16 14:20:52 +08005238 info = get_domain_info(dev);
Lu Baolu67b8e022019-03-25 09:30:32 +08005239 iommu = info->iommu;
5240
5241 auxiliary_unlink_device(domain, dev);
5242
5243 spin_lock(&iommu->lock);
Lu Baolu37e91bd2020-05-16 14:20:57 +08005244 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
Lu Baolu67b8e022019-03-25 09:30:32 +08005245 domain_detach_iommu(domain, iommu);
5246 spin_unlock(&iommu->lock);
5247
5248 spin_unlock_irqrestore(&device_domain_lock, flags);
5249}
5250
Lu Baolu8cc3759a2019-03-25 09:30:31 +08005251static int prepare_domain_attach_device(struct iommu_domain *domain,
5252 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005253{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005254 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005255 struct intel_iommu *iommu;
5256 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03005257
Lu Baoludd6692f2020-07-24 09:49:21 +08005258 iommu = device_to_iommu(dev, NULL, NULL);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005259 if (!iommu)
5260 return -ENODEV;
5261
5262 /* check if this iommu agaw is sufficient for max mapped address */
5263 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01005264 if (addr_width > cap_mgaw(iommu->cap))
5265 addr_width = cap_mgaw(iommu->cap);
5266
5267 if (dmar_domain->max_addr > (1LL << addr_width)) {
Bjorn Helgaas932a6522019-02-08 16:06:00 -06005268 dev_err(dev, "%s: iommu width (%d) is not "
5269 "sufficient for the mapped address (%llx)\n",
5270 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005271 return -EFAULT;
5272 }
Tom Lyona99c47a2010-05-17 08:20:45 +01005273 dmar_domain->gaw = addr_width;
5274
5275 /*
5276 * Knock out extra levels of page tables if necessary
5277 */
5278 while (iommu->agaw < dmar_domain->agaw) {
5279 struct dma_pte *pte;
5280
5281 pte = dmar_domain->pgd;
5282 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08005283 dmar_domain->pgd = (struct dma_pte *)
5284 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01005285 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01005286 }
5287 dmar_domain->agaw--;
5288 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005289
Lu Baolu8cc3759a2019-03-25 09:30:31 +08005290 return 0;
5291}
5292
5293static int intel_iommu_attach_device(struct iommu_domain *domain,
5294 struct device *dev)
5295{
5296 int ret;
5297
Lu Baolu56795822019-06-12 08:28:48 +08005298 if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
5299 device_is_rmrr_locked(dev)) {
Lu Baolu8cc3759a2019-03-25 09:30:31 +08005300 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5301 return -EPERM;
5302 }
5303
Lu Baolu67b8e022019-03-25 09:30:32 +08005304 if (is_aux_domain(dev, domain))
5305 return -EPERM;
5306
Lu Baolu8cc3759a2019-03-25 09:30:31 +08005307 /* normally dev is not mapped */
5308 if (unlikely(domain_context_mapped(dev))) {
5309 struct dmar_domain *old_domain;
5310
5311 old_domain = find_domain(dev);
Lu Baolufa954e62019-05-25 13:41:28 +08005312 if (old_domain)
Lu Baolu8cc3759a2019-03-25 09:30:31 +08005313 dmar_remove_one_dev_info(dev);
Lu Baolu8cc3759a2019-03-25 09:30:31 +08005314 }
5315
5316 ret = prepare_domain_attach_device(domain, dev);
5317 if (ret)
5318 return ret;
5319
5320 return domain_add_dev_info(to_dmar_domain(domain), dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005321}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005322
Lu Baolu67b8e022019-03-25 09:30:32 +08005323static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
5324 struct device *dev)
5325{
5326 int ret;
5327
5328 if (!is_aux_domain(dev, domain))
5329 return -EPERM;
5330
5331 ret = prepare_domain_attach_device(domain, dev);
5332 if (ret)
5333 return ret;
5334
5335 return aux_domain_add_dev(to_dmar_domain(domain), dev);
5336}
5337
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005338static void intel_iommu_detach_device(struct iommu_domain *domain,
5339 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005340{
Bjorn Helgaas71753232019-02-08 16:06:15 -06005341 dmar_remove_one_dev_info(dev);
Kay, Allen M38717942008-09-09 18:37:29 +03005342}
Kay, Allen M38717942008-09-09 18:37:29 +03005343
Lu Baolu67b8e022019-03-25 09:30:32 +08005344static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
5345 struct device *dev)
5346{
5347 aux_domain_remove_dev(to_dmar_domain(domain), dev);
5348}
5349
Jacob Pan6ee1b772020-05-16 14:20:49 +08005350/*
5351 * 2D array for converting and sanitizing IOMMU generic TLB granularity to
5352 * VT-d granularity. Invalidation is typically included in the unmap operation
5353 * as a result of DMA or VFIO unmap. However, for assigned devices guest
5354 * owns the first level page tables. Invalidations of translation caches in the
5355 * guest are trapped and passed down to the host.
5356 *
5357 * vIOMMU in the guest will only expose first level page tables, therefore
5358 * we do not support IOTLB granularity for request without PASID (second level).
5359 *
5360 * For example, to find the VT-d granularity encoding for IOTLB
5361 * type and page selective granularity within PASID:
5362 * X: indexed by iommu cache type
5363 * Y: indexed by enum iommu_inv_granularity
5364 * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
5365 */
5366
Qian Cai7809c4d2020-05-21 17:50:30 -04005367static const int
Jacob Pan6ee1b772020-05-16 14:20:49 +08005368inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
5369 /*
5370 * PASID based IOTLB invalidation: PASID selective (per PASID),
5371 * page selective (address granularity)
5372 */
5373 {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
5374 /* PASID based dev TLBs */
5375 {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
5376 /* PASID cache */
5377 {-EINVAL, -EINVAL, -EINVAL}
5378};
5379
5380static inline int to_vtd_granularity(int type, int granu)
5381{
5382 return inv_type_granu_table[type][granu];
5383}
5384
5385static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
5386{
5387 u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
5388
5389 /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
5390 * IOMMU cache invalidate API passes granu_size in bytes, and number of
5391 * granu size in contiguous memory.
5392 */
5393 return order_base_2(nr_pages);
5394}
5395
5396#ifdef CONFIG_INTEL_IOMMU_SVM
5397static int
5398intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
5399 struct iommu_cache_invalidate_info *inv_info)
5400{
5401 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5402 struct device_domain_info *info;
5403 struct intel_iommu *iommu;
5404 unsigned long flags;
5405 int cache_type;
5406 u8 bus, devfn;
5407 u16 did, sid;
5408 int ret = 0;
5409 u64 size = 0;
5410
5411 if (!inv_info || !dmar_domain ||
5412 inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
5413 return -EINVAL;
5414
5415 if (!dev || !dev_is_pci(dev))
5416 return -ENODEV;
5417
5418 iommu = device_to_iommu(dev, &bus, &devfn);
5419 if (!iommu)
5420 return -ENODEV;
5421
5422 if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
5423 return -EINVAL;
5424
5425 spin_lock_irqsave(&device_domain_lock, flags);
5426 spin_lock(&iommu->lock);
Lu Baolue85bb992020-05-16 14:20:52 +08005427 info = get_domain_info(dev);
Jacob Pan6ee1b772020-05-16 14:20:49 +08005428 if (!info) {
5429 ret = -EINVAL;
5430 goto out_unlock;
5431 }
5432 did = dmar_domain->iommu_did[iommu->seq_id];
5433 sid = PCI_DEVID(bus, devfn);
5434
5435 /* Size is only valid in address selective invalidation */
Liu Yi L0fa1a152020-07-24 09:49:18 +08005436 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
Jacob Pan6ee1b772020-05-16 14:20:49 +08005437 size = to_vtd_size(inv_info->addr_info.granule_size,
5438 inv_info->addr_info.nb_granules);
5439
5440 for_each_set_bit(cache_type,
5441 (unsigned long *)&inv_info->cache,
5442 IOMMU_CACHE_INV_TYPE_NR) {
5443 int granu = 0;
5444 u64 pasid = 0;
Liu Yi L0fa1a152020-07-24 09:49:18 +08005445 u64 addr = 0;
Jacob Pan6ee1b772020-05-16 14:20:49 +08005446
5447 granu = to_vtd_granularity(cache_type, inv_info->granularity);
5448 if (granu == -EINVAL) {
5449 pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
5450 cache_type, inv_info->granularity);
5451 break;
5452 }
5453
5454 /*
5455 * PASID is stored in different locations based on the
5456 * granularity.
5457 */
5458 if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
5459 (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
5460 pasid = inv_info->pasid_info.pasid;
5461 else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
5462 (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
5463 pasid = inv_info->addr_info.pasid;
5464
5465 switch (BIT(cache_type)) {
5466 case IOMMU_CACHE_INV_TYPE_IOTLB:
Jacob Pan1ff00272020-07-24 09:49:19 +08005467 /* HW will ignore LSB bits based on address mask */
Jacob Pan6ee1b772020-05-16 14:20:49 +08005468 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
5469 size &&
5470 (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
Jacob Pan1ff00272020-07-24 09:49:19 +08005471 pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
Jacob Pan6ee1b772020-05-16 14:20:49 +08005472 inv_info->addr_info.addr, size);
Jacob Pan6ee1b772020-05-16 14:20:49 +08005473 }
5474
5475 /*
5476 * If granu is PASID-selective, address is ignored.
5477 * We use npages = -1 to indicate that.
5478 */
5479 qi_flush_piotlb(iommu, did, pasid,
5480 mm_to_dma_pfn(inv_info->addr_info.addr),
5481 (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
5482 inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
5483
Liu Yi L0fa1a152020-07-24 09:49:18 +08005484 if (!info->ats_enabled)
5485 break;
Jacob Pan6ee1b772020-05-16 14:20:49 +08005486 /*
5487 * Always flush device IOTLB if ATS is enabled. vIOMMU
5488 * in the guest may assume IOTLB flush is inclusive,
5489 * which is more efficient.
5490 */
Liu Yi L0fa1a152020-07-24 09:49:18 +08005491 fallthrough;
Jacob Pan6ee1b772020-05-16 14:20:49 +08005492 case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
Liu Yi L0fa1a152020-07-24 09:49:18 +08005493 /*
5494 * PASID based device TLB invalidation does not support
5495 * IOMMU_INV_GRANU_PASID granularity but only supports
5496 * IOMMU_INV_GRANU_ADDR.
5497 * The equivalent of that is we set the size to be the
5498 * entire range of 64 bit. User only provides PASID info
5499 * without address info. So we set addr to 0.
5500 */
5501 if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
5502 size = 64 - VTD_PAGE_SHIFT;
5503 addr = 0;
5504 } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
5505 addr = inv_info->addr_info.addr;
5506 }
5507
Jacob Pan6ee1b772020-05-16 14:20:49 +08005508 if (info->ats_enabled)
5509 qi_flush_dev_iotlb_pasid(iommu, sid,
5510 info->pfsid, pasid,
Liu Yi L0fa1a152020-07-24 09:49:18 +08005511 info->ats_qdep, addr,
Jacob Pan78df6c82020-07-24 09:49:15 +08005512 size);
Jacob Pan6ee1b772020-05-16 14:20:49 +08005513 else
5514 pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
5515 break;
5516 default:
5517 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
5518 cache_type);
5519 ret = -EINVAL;
5520 }
5521 }
5522out_unlock:
5523 spin_unlock(&iommu->lock);
5524 spin_unlock_irqrestore(&device_domain_lock, flags);
5525
5526 return ret;
5527}
5528#endif
5529
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005530static int intel_iommu_map(struct iommu_domain *domain,
5531 unsigned long iova, phys_addr_t hpa,
Tom Murphy781ca2d2019-09-08 09:56:38 -07005532 size_t size, int iommu_prot, gfp_t gfp)
Kay, Allen M38717942008-09-09 18:37:29 +03005533{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005534 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005535 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005536 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005537 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005538
Joerg Roedeldde57a22008-12-03 15:04:09 +01005539 if (iommu_prot & IOMMU_READ)
5540 prot |= DMA_PTE_READ;
5541 if (iommu_prot & IOMMU_WRITE)
5542 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08005543 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5544 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005545
David Woodhouse163cc522009-06-28 00:51:17 +01005546 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005547 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005548 u64 end;
5549
5550 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01005551 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005552 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005553 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005554 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01005555 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005556 return -EFAULT;
5557 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01005558 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005559 }
David Woodhousead051222009-06-28 14:22:28 +01005560 /* Round up size to next multiple of PAGE_SIZE, if it and
5561 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01005562 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01005563 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5564 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005565 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03005566}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005567
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005568static size_t intel_iommu_unmap(struct iommu_domain *domain,
Will Deacon56f8af52019-07-02 16:44:06 +01005569 unsigned long iova, size_t size,
5570 struct iommu_iotlb_gather *gather)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005571{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005572 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00005573 struct page *freelist = NULL;
David Woodhouseea8ea462014-03-05 17:09:32 +00005574 unsigned long start_pfn, last_pfn;
5575 unsigned int npages;
Joerg Roedel42e8c182015-07-21 15:50:02 +02005576 int iommu_id, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01005577
David Woodhouse5cf0a762014-03-19 16:07:49 +00005578 /* Cope with horrid API which requires us to unmap more than the
5579 size argument if it happens to be a large-page mapping. */
Joerg Roedeldc02e462015-08-13 11:15:13 +02005580 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
David Woodhouse5cf0a762014-03-19 16:07:49 +00005581
5582 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5583 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5584
David Woodhouseea8ea462014-03-05 17:09:32 +00005585 start_pfn = iova >> VTD_PAGE_SHIFT;
5586 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5587
5588 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5589
5590 npages = last_pfn - start_pfn + 1;
5591
Shaokun Zhangf746a022018-03-22 18:18:06 +08005592 for_each_domain_iommu(iommu_id, dmar_domain)
Joerg Roedel42e8c182015-07-21 15:50:02 +02005593 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5594 start_pfn, npages, !freelist, 0);
David Woodhouseea8ea462014-03-05 17:09:32 +00005595
5596 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005597
David Woodhouse163cc522009-06-28 00:51:17 +01005598 if (dmar_domain->max_addr == iova + size)
5599 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005600
David Woodhouse5cf0a762014-03-19 16:07:49 +00005601 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005602}
Kay, Allen M38717942008-09-09 18:37:29 +03005603
Joerg Roedeld14d6572008-12-03 15:06:57 +01005604static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05305605 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03005606{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005607 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03005608 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00005609 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005610 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03005611
David Woodhouse5cf0a762014-03-19 16:07:49 +00005612 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Yonghyun Hwang77a1bce2020-02-26 12:30:06 -08005613 if (pte && dma_pte_present(pte))
5614 phys = dma_pte_addr(pte) +
5615 (iova & (BIT_MASK(level_to_offset_bits(level) +
5616 VTD_PAGE_SHIFT) - 1));
Kay, Allen M38717942008-09-09 18:37:29 +03005617
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005618 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03005619}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005620
Lu Baolu95587a72019-03-25 09:30:30 +08005621static inline bool scalable_mode_support(void)
5622{
5623 struct dmar_drhd_unit *drhd;
5624 struct intel_iommu *iommu;
5625 bool ret = true;
5626
5627 rcu_read_lock();
5628 for_each_active_iommu(iommu, drhd) {
5629 if (!sm_supported(iommu)) {
5630 ret = false;
5631 break;
5632 }
5633 }
5634 rcu_read_unlock();
5635
5636 return ret;
5637}
5638
5639static inline bool iommu_pasid_support(void)
5640{
5641 struct dmar_drhd_unit *drhd;
5642 struct intel_iommu *iommu;
5643 bool ret = true;
5644
5645 rcu_read_lock();
5646 for_each_active_iommu(iommu, drhd) {
5647 if (!pasid_supported(iommu)) {
5648 ret = false;
5649 break;
5650 }
5651 }
5652 rcu_read_unlock();
5653
5654 return ret;
5655}
5656
Lu Baolu2cd13112020-01-02 08:18:15 +08005657static inline bool nested_mode_support(void)
5658{
5659 struct dmar_drhd_unit *drhd;
5660 struct intel_iommu *iommu;
5661 bool ret = true;
5662
5663 rcu_read_lock();
5664 for_each_active_iommu(iommu, drhd) {
5665 if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
5666 ret = false;
5667 break;
5668 }
5669 }
5670 rcu_read_unlock();
5671
5672 return ret;
5673}
5674
Joerg Roedel5d587b82014-09-05 10:50:45 +02005675static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005676{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005677 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005678 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04005679 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005680 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005681
Joerg Roedel5d587b82014-09-05 10:50:45 +02005682 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005683}
5684
Joerg Roedele5d18412020-04-29 15:36:54 +02005685static struct iommu_device *intel_iommu_probe_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005686{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005687 struct intel_iommu *iommu;
Alex Williamson70ae6f02011-10-21 15:56:11 -04005688
Lu Baoludd6692f2020-07-24 09:49:21 +08005689 iommu = device_to_iommu(dev, NULL, NULL);
Alex Williamsona5459cf2014-06-12 16:12:31 -06005690 if (!iommu)
Joerg Roedele5d18412020-04-29 15:36:54 +02005691 return ERR_PTR(-ENODEV);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005692
Lu Baolu8af46c72019-05-25 13:41:32 +08005693 if (translation_pre_enabled(iommu))
Joerg Roedel01b9d4e2020-06-25 15:08:25 +02005694 dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
Lu Baolu8af46c72019-05-25 13:41:32 +08005695
Joerg Roedele5d18412020-04-29 15:36:54 +02005696 return &iommu->iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005697}
5698
Joerg Roedele5d18412020-04-29 15:36:54 +02005699static void intel_iommu_release_device(struct device *dev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005700{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005701 struct intel_iommu *iommu;
Alex Williamsona5459cf2014-06-12 16:12:31 -06005702
Lu Baoludd6692f2020-07-24 09:49:21 +08005703 iommu = device_to_iommu(dev, NULL, NULL);
Alex Williamsona5459cf2014-06-12 16:12:31 -06005704 if (!iommu)
5705 return;
5706
Lu Baolu458b7c82019-08-01 11:14:58 +08005707 dmar_remove_one_dev_info(dev);
5708
Lu Baolu6fc70202020-05-06 09:59:47 +08005709 set_dma_ops(dev, NULL);
5710}
Alex Williamsona5459cf2014-06-12 16:12:31 -06005711
Lu Baolu6fc70202020-05-06 09:59:47 +08005712static void intel_iommu_probe_finalize(struct device *dev)
5713{
5714 struct iommu_domain *domain;
Lu Baolucfb94a32019-09-06 14:14:52 +08005715
Lu Baolu6fc70202020-05-06 09:59:47 +08005716 domain = iommu_get_domain_for_dev(dev);
Lu Baolucfb94a32019-09-06 14:14:52 +08005717 if (device_needs_bounce(dev))
Lu Baolu6fc70202020-05-06 09:59:47 +08005718 set_dma_ops(dev, &bounce_dma_ops);
5719 else if (domain && domain->type == IOMMU_DOMAIN_DMA)
5720 set_dma_ops(dev, &intel_dma_ops);
5721 else
Lu Baolucfb94a32019-09-06 14:14:52 +08005722 set_dma_ops(dev, NULL);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005723}
5724
Eric Auger0659b8d2017-01-19 20:57:53 +00005725static void intel_iommu_get_resv_regions(struct device *device,
5726 struct list_head *head)
5727{
Eric Auger5f64ce52019-06-03 08:53:31 +02005728 int prot = DMA_PTE_READ | DMA_PTE_WRITE;
Eric Auger0659b8d2017-01-19 20:57:53 +00005729 struct iommu_resv_region *reg;
5730 struct dmar_rmrr_unit *rmrr;
5731 struct device *i_dev;
5732 int i;
5733
Eric Auger5f64ce52019-06-03 08:53:31 +02005734 down_read(&dmar_global_lock);
Eric Auger0659b8d2017-01-19 20:57:53 +00005735 for_each_rmrr_units(rmrr) {
5736 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5737 i, i_dev) {
Eric Auger5f64ce52019-06-03 08:53:31 +02005738 struct iommu_resv_region *resv;
Eric Auger1c5c59f2019-06-03 08:53:36 +02005739 enum iommu_resv_type type;
Eric Auger5f64ce52019-06-03 08:53:31 +02005740 size_t length;
5741
Eric Auger3855ba22019-06-03 08:53:34 +02005742 if (i_dev != device &&
5743 !is_downstream_to_pci_bridge(device, i_dev))
Eric Auger0659b8d2017-01-19 20:57:53 +00005744 continue;
5745
Eric Auger5f64ce52019-06-03 08:53:31 +02005746 length = rmrr->end_address - rmrr->base_address + 1;
Eric Auger1c5c59f2019-06-03 08:53:36 +02005747
5748 type = device_rmrr_is_relaxable(device) ?
5749 IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
5750
Eric Auger5f64ce52019-06-03 08:53:31 +02005751 resv = iommu_alloc_resv_region(rmrr->base_address,
Eric Auger1c5c59f2019-06-03 08:53:36 +02005752 length, prot, type);
Eric Auger5f64ce52019-06-03 08:53:31 +02005753 if (!resv)
5754 break;
5755
5756 list_add_tail(&resv->list, head);
Eric Auger0659b8d2017-01-19 20:57:53 +00005757 }
5758 }
Eric Auger5f64ce52019-06-03 08:53:31 +02005759 up_read(&dmar_global_lock);
Eric Auger0659b8d2017-01-19 20:57:53 +00005760
Lu Baolud850c2e2019-05-25 13:41:24 +08005761#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5762 if (dev_is_pci(device)) {
5763 struct pci_dev *pdev = to_pci_dev(device);
5764
5765 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
Jerry Snitselaarcde93192019-12-12 22:36:42 -07005766 reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
Alex Williamsond8018a02019-12-11 13:28:29 -07005767 IOMMU_RESV_DIRECT_RELAXABLE);
Lu Baolud850c2e2019-05-25 13:41:24 +08005768 if (reg)
5769 list_add_tail(&reg->list, head);
5770 }
5771 }
5772#endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5773
Eric Auger0659b8d2017-01-19 20:57:53 +00005774 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5775 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00005776 0, IOMMU_RESV_MSI);
Eric Auger0659b8d2017-01-19 20:57:53 +00005777 if (!reg)
5778 return;
5779 list_add_tail(&reg->list, head);
5780}
5781
Lu Baolud7cbc0f2019-03-25 09:30:29 +08005782int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005783{
5784 struct device_domain_info *info;
5785 struct context_entry *context;
5786 struct dmar_domain *domain;
5787 unsigned long flags;
5788 u64 ctx_lo;
5789 int ret;
5790
Lu Baolu4ec066c2019-05-25 13:41:33 +08005791 domain = find_domain(dev);
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005792 if (!domain)
5793 return -EINVAL;
5794
5795 spin_lock_irqsave(&device_domain_lock, flags);
5796 spin_lock(&iommu->lock);
5797
5798 ret = -EINVAL;
Lu Baolue85bb992020-05-16 14:20:52 +08005799 info = get_domain_info(dev);
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005800 if (!info || !info->pasid_supported)
5801 goto out;
5802
5803 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5804 if (WARN_ON(!context))
5805 goto out;
5806
5807 ctx_lo = context[0].lo;
5808
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005809 if (!(ctx_lo & CONTEXT_PASIDE)) {
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005810 ctx_lo |= CONTEXT_PASIDE;
5811 context[0].lo = ctx_lo;
5812 wmb();
Lu Baolud7cbc0f2019-03-25 09:30:29 +08005813 iommu->flush.flush_context(iommu,
5814 domain->iommu_did[iommu->seq_id],
5815 PCI_DEVID(info->bus, info->devfn),
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005816 DMA_CCMD_MASK_NOBIT,
5817 DMA_CCMD_DEVICE_INVL);
5818 }
5819
5820 /* Enable PASID support in the device, if it wasn't already */
5821 if (!info->pasid_enabled)
5822 iommu_enable_dev_iotlb(info);
5823
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005824 ret = 0;
5825
5826 out:
5827 spin_unlock(&iommu->lock);
5828 spin_unlock_irqrestore(&device_domain_lock, flags);
5829
5830 return ret;
5831}
5832
James Sewart73bcbdc2019-05-25 13:41:23 +08005833static void intel_iommu_apply_resv_region(struct device *dev,
5834 struct iommu_domain *domain,
5835 struct iommu_resv_region *region)
5836{
5837 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5838 unsigned long start, end;
5839
5840 start = IOVA_PFN(region->start);
5841 end = IOVA_PFN(region->start + region->length - 1);
5842
5843 WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
5844}
5845
Patrick Steinhardt4a350a02019-12-27 00:56:18 +01005846static struct iommu_group *intel_iommu_device_group(struct device *dev)
5847{
5848 if (dev_is_pci(dev))
5849 return pci_device_group(dev);
5850 return generic_device_group(dev);
5851}
5852
Lu Baolu95587a72019-03-25 09:30:30 +08005853static int intel_iommu_enable_auxd(struct device *dev)
5854{
5855 struct device_domain_info *info;
5856 struct intel_iommu *iommu;
5857 unsigned long flags;
Lu Baolu95587a72019-03-25 09:30:30 +08005858 int ret;
5859
Lu Baoludd6692f2020-07-24 09:49:21 +08005860 iommu = device_to_iommu(dev, NULL, NULL);
Lu Baolu95587a72019-03-25 09:30:30 +08005861 if (!iommu || dmar_disabled)
5862 return -EINVAL;
5863
5864 if (!sm_supported(iommu) || !pasid_supported(iommu))
5865 return -EINVAL;
5866
5867 ret = intel_iommu_enable_pasid(iommu, dev);
5868 if (ret)
5869 return -ENODEV;
5870
5871 spin_lock_irqsave(&device_domain_lock, flags);
Lu Baolue85bb992020-05-16 14:20:52 +08005872 info = get_domain_info(dev);
Lu Baolu95587a72019-03-25 09:30:30 +08005873 info->auxd_enabled = 1;
5874 spin_unlock_irqrestore(&device_domain_lock, flags);
5875
5876 return 0;
5877}
5878
5879static int intel_iommu_disable_auxd(struct device *dev)
5880{
5881 struct device_domain_info *info;
5882 unsigned long flags;
5883
5884 spin_lock_irqsave(&device_domain_lock, flags);
Lu Baolue85bb992020-05-16 14:20:52 +08005885 info = get_domain_info(dev);
Lu Baolu95587a72019-03-25 09:30:30 +08005886 if (!WARN_ON(!info))
5887 info->auxd_enabled = 0;
5888 spin_unlock_irqrestore(&device_domain_lock, flags);
5889
5890 return 0;
5891}
5892
5893/*
5894 * A PCI express designated vendor specific extended capability is defined
5895 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5896 * for system software and tools to detect endpoint devices supporting the
5897 * Intel scalable IO virtualization without host driver dependency.
5898 *
5899 * Returns the address of the matching extended capability structure within
5900 * the device's PCI configuration space or 0 if the device does not support
5901 * it.
5902 */
5903static int siov_find_pci_dvsec(struct pci_dev *pdev)
5904{
5905 int pos;
5906 u16 vendor, id;
5907
5908 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5909 while (pos) {
5910 pci_read_config_word(pdev, pos + 4, &vendor);
5911 pci_read_config_word(pdev, pos + 8, &id);
5912 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5913 return pos;
5914
5915 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5916 }
5917
5918 return 0;
5919}
5920
5921static bool
5922intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5923{
5924 if (feat == IOMMU_DEV_FEAT_AUX) {
5925 int ret;
5926
5927 if (!dev_is_pci(dev) || dmar_disabled ||
5928 !scalable_mode_support() || !iommu_pasid_support())
5929 return false;
5930
5931 ret = pci_pasid_features(to_pci_dev(dev));
5932 if (ret < 0)
5933 return false;
5934
5935 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5936 }
5937
Jacob Pan76fdd6c2020-05-16 14:20:53 +08005938 if (feat == IOMMU_DEV_FEAT_SVA) {
5939 struct device_domain_info *info = get_domain_info(dev);
5940
5941 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
5942 info->pasid_supported && info->pri_supported &&
5943 info->ats_supported;
5944 }
5945
Lu Baolu95587a72019-03-25 09:30:30 +08005946 return false;
5947}
5948
5949static int
5950intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5951{
5952 if (feat == IOMMU_DEV_FEAT_AUX)
5953 return intel_iommu_enable_auxd(dev);
5954
Jacob Pan76fdd6c2020-05-16 14:20:53 +08005955 if (feat == IOMMU_DEV_FEAT_SVA) {
5956 struct device_domain_info *info = get_domain_info(dev);
5957
5958 if (!info)
5959 return -EINVAL;
5960
5961 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
5962 return 0;
5963 }
5964
Lu Baolu95587a72019-03-25 09:30:30 +08005965 return -ENODEV;
5966}
5967
5968static int
5969intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5970{
5971 if (feat == IOMMU_DEV_FEAT_AUX)
5972 return intel_iommu_disable_auxd(dev);
5973
5974 return -ENODEV;
5975}
5976
5977static bool
5978intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5979{
Lu Baolue85bb992020-05-16 14:20:52 +08005980 struct device_domain_info *info = get_domain_info(dev);
Lu Baolu95587a72019-03-25 09:30:30 +08005981
5982 if (feat == IOMMU_DEV_FEAT_AUX)
5983 return scalable_mode_support() && info && info->auxd_enabled;
5984
5985 return false;
5986}
5987
Lu Baolu0e8000f2019-03-25 09:30:33 +08005988static int
5989intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5990{
5991 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5992
5993 return dmar_domain->default_pasid > 0 ?
5994 dmar_domain->default_pasid : -EINVAL;
5995}
5996
Lu Baolu8af46c72019-05-25 13:41:32 +08005997static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
5998 struct device *dev)
5999{
Joerg Roedel1d46159782020-02-17 17:12:37 +01006000 return attach_deferred(dev);
Lu Baolu8af46c72019-05-25 13:41:32 +08006001}
6002
Lu Baolu2cd13112020-01-02 08:18:15 +08006003static int
6004intel_iommu_domain_set_attr(struct iommu_domain *domain,
6005 enum iommu_attr attr, void *data)
6006{
6007 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
6008 unsigned long flags;
6009 int ret = 0;
6010
6011 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
6012 return -EINVAL;
6013
6014 switch (attr) {
6015 case DOMAIN_ATTR_NESTING:
6016 spin_lock_irqsave(&device_domain_lock, flags);
6017 if (nested_mode_support() &&
6018 list_empty(&dmar_domain->devices)) {
6019 dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
6020 dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
6021 } else {
6022 ret = -ENODEV;
6023 }
6024 spin_unlock_irqrestore(&device_domain_lock, flags);
6025 break;
6026 default:
6027 ret = -EINVAL;
6028 break;
6029 }
6030
6031 return ret;
6032}
6033
Rajat Jain67e8a5b2020-06-23 07:13:42 +08006034/*
6035 * Check that the device does not live on an external facing PCI port that is
6036 * marked as untrusted. Such devices should not be able to apply quirks and
6037 * thus not be able to bypass the IOMMU restrictions.
6038 */
6039static bool risky_device(struct pci_dev *pdev)
6040{
6041 if (pdev->untrusted) {
6042 pci_info(pdev,
6043 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
6044 pdev->vendor, pdev->device);
6045 pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
6046 return true;
6047 }
6048 return false;
6049}
6050
Joerg Roedelb0119e82017-02-01 13:23:08 +01006051const struct iommu_ops intel_iommu_ops = {
Eric Auger0659b8d2017-01-19 20:57:53 +00006052 .capable = intel_iommu_capable,
6053 .domain_alloc = intel_iommu_domain_alloc,
6054 .domain_free = intel_iommu_domain_free,
Lu Baolu2cd13112020-01-02 08:18:15 +08006055 .domain_set_attr = intel_iommu_domain_set_attr,
Eric Auger0659b8d2017-01-19 20:57:53 +00006056 .attach_dev = intel_iommu_attach_device,
6057 .detach_dev = intel_iommu_detach_device,
Lu Baolu67b8e022019-03-25 09:30:32 +08006058 .aux_attach_dev = intel_iommu_aux_attach_device,
6059 .aux_detach_dev = intel_iommu_aux_detach_device,
Lu Baolu0e8000f2019-03-25 09:30:33 +08006060 .aux_get_pasid = intel_iommu_aux_get_pasid,
Eric Auger0659b8d2017-01-19 20:57:53 +00006061 .map = intel_iommu_map,
6062 .unmap = intel_iommu_unmap,
Eric Auger0659b8d2017-01-19 20:57:53 +00006063 .iova_to_phys = intel_iommu_iova_to_phys,
Joerg Roedele5d18412020-04-29 15:36:54 +02006064 .probe_device = intel_iommu_probe_device,
Lu Baolu6fc70202020-05-06 09:59:47 +08006065 .probe_finalize = intel_iommu_probe_finalize,
Joerg Roedele5d18412020-04-29 15:36:54 +02006066 .release_device = intel_iommu_release_device,
Eric Auger0659b8d2017-01-19 20:57:53 +00006067 .get_resv_regions = intel_iommu_get_resv_regions,
Thierry Reding0ecdebb2019-12-18 14:42:04 +01006068 .put_resv_regions = generic_iommu_put_resv_regions,
James Sewart73bcbdc2019-05-25 13:41:23 +08006069 .apply_resv_region = intel_iommu_apply_resv_region,
Patrick Steinhardt4a350a02019-12-27 00:56:18 +01006070 .device_group = intel_iommu_device_group,
Lu Baolu95587a72019-03-25 09:30:30 +08006071 .dev_has_feat = intel_iommu_dev_has_feat,
6072 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
6073 .dev_enable_feat = intel_iommu_dev_enable_feat,
6074 .dev_disable_feat = intel_iommu_dev_disable_feat,
Lu Baolu8af46c72019-05-25 13:41:32 +08006075 .is_attach_deferred = intel_iommu_is_attach_deferred,
Joerg Roedel7039d112020-04-29 15:36:42 +02006076 .def_domain_type = device_def_domain_type,
Eric Auger0659b8d2017-01-19 20:57:53 +00006077 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Jacob Pan56722a42020-05-16 14:20:47 +08006078#ifdef CONFIG_INTEL_IOMMU_SVM
Jacob Pan6ee1b772020-05-16 14:20:49 +08006079 .cache_invalidate = intel_iommu_sva_invalidate,
Jacob Pan56722a42020-05-16 14:20:47 +08006080 .sva_bind_gpasid = intel_svm_bind_gpasid,
6081 .sva_unbind_gpasid = intel_svm_unbind_gpasid,
Jacob Pan064a57d2020-05-16 14:20:54 +08006082 .sva_bind = intel_svm_bind,
6083 .sva_unbind = intel_svm_unbind,
6084 .sva_get_pasid = intel_svm_get_pasid,
Lu Baolu8b737122020-07-24 09:49:24 +08006085 .page_response = intel_svm_page_response,
Jacob Pan56722a42020-05-16 14:20:47 +08006086#endif
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01006087};
David Woodhouse9af88142009-02-13 23:18:03 +00006088
Chris Wilson1f762492019-09-09 12:00:10 +01006089static void quirk_iommu_igfx(struct pci_dev *dev)
Daniel Vetter94526182013-01-20 23:50:13 +01006090{
Rajat Jain67e8a5b2020-06-23 07:13:42 +08006091 if (risky_device(dev))
6092 return;
6093
Bjorn Helgaas932a6522019-02-08 16:06:00 -06006094 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01006095 dmar_map_gfx = 0;
6096}
6097
Chris Wilson1f762492019-09-09 12:00:10 +01006098/* G4x/GM45 integrated gfx dmar support is totally busted. */
6099DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
6100DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
6101DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
6102DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
6103DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
6104DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
6105DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
6106
6107/* Broadwell igfx malfunctions with dmar */
6108DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
6109DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
6110DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
6111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
6112DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
6113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
6114DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
6115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
6116DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
6117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
6118DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
6119DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
6120DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
6121DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
6122DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
6123DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
6124DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
6125DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
6126DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
6127DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
6128DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
6129DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
6130DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
6131DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
Daniel Vetter94526182013-01-20 23:50:13 +01006132
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08006133static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00006134{
Rajat Jain67e8a5b2020-06-23 07:13:42 +08006135 if (risky_device(dev))
6136 return;
6137
David Woodhouse9af88142009-02-13 23:18:03 +00006138 /*
6139 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01006140 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00006141 */
Bjorn Helgaas932a6522019-02-08 16:06:00 -06006142 pci_info(dev, "Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00006143 rwbf_quirk = 1;
6144}
6145
6146DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01006147DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
6148DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
6149DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
6150DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
6151DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
6152DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07006153
Adam Jacksoneecfd572010-08-25 21:17:34 +01006154#define GGC 0x52
6155#define GGC_MEMORY_SIZE_MASK (0xf << 8)
6156#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
6157#define GGC_MEMORY_SIZE_1M (0x1 << 8)
6158#define GGC_MEMORY_SIZE_2M (0x3 << 8)
6159#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
6160#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
6161#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
6162#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
6163
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08006164static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01006165{
6166 unsigned short ggc;
6167
Rajat Jain67e8a5b2020-06-23 07:13:42 +08006168 if (risky_device(dev))
6169 return;
6170
Adam Jacksoneecfd572010-08-25 21:17:34 +01006171 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01006172 return;
6173
Adam Jacksoneecfd572010-08-25 21:17:34 +01006174 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Bjorn Helgaas932a6522019-02-08 16:06:00 -06006175 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01006176 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07006177 } else if (dmar_map_gfx) {
6178 /* we have to ensure the gfx device is idle before we flush */
Bjorn Helgaas932a6522019-02-08 16:06:00 -06006179 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07006180 intel_iommu_strict = 1;
6181 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01006182}
6183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
6184DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
6185DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
6186DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
6187
Lu Baolub1012ca2020-07-23 09:34:37 +08006188static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
6189{
6190 unsigned short ver;
6191
6192 if (!IS_GFX_DEVICE(dev))
6193 return;
6194
6195 ver = (dev->device >> 8) & 0xff;
6196 if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
6197 ver != 0x4e && ver != 0x8a && ver != 0x98 &&
6198 ver != 0x9a)
6199 return;
6200
6201 if (risky_device(dev))
6202 return;
6203
6204 pci_info(dev, "Skip IOMMU disabling for graphics\n");
6205 iommu_skip_te_disable = 1;
6206}
6207DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
6208
David Woodhousee0fc7e02009-09-30 09:12:17 -07006209/* On Tylersburg chipsets, some BIOSes have been known to enable the
6210 ISOCH DMAR unit for the Azalia sound device, but not give it any
6211 TLB entries, which causes it to deadlock. Check for that. We do
6212 this in a function called from init_dmars(), instead of in a PCI
6213 quirk, because we don't want to print the obnoxious "BIOS broken"
6214 message if VT-d is actually disabled.
6215*/
6216static void __init check_tylersburg_isoch(void)
6217{
6218 struct pci_dev *pdev;
6219 uint32_t vtisochctrl;
6220
6221 /* If there's no Azalia in the system anyway, forget it. */
6222 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
6223 if (!pdev)
6224 return;
Rajat Jain67e8a5b2020-06-23 07:13:42 +08006225
6226 if (risky_device(pdev)) {
6227 pci_dev_put(pdev);
6228 return;
6229 }
6230
David Woodhousee0fc7e02009-09-30 09:12:17 -07006231 pci_dev_put(pdev);
6232
6233 /* System Management Registers. Might be hidden, in which case
6234 we can't do the sanity check. But that's OK, because the
6235 known-broken BIOSes _don't_ actually hide it, so far. */
6236 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
6237 if (!pdev)
6238 return;
6239
Rajat Jain67e8a5b2020-06-23 07:13:42 +08006240 if (risky_device(pdev)) {
6241 pci_dev_put(pdev);
6242 return;
6243 }
6244
David Woodhousee0fc7e02009-09-30 09:12:17 -07006245 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
6246 pci_dev_put(pdev);
6247 return;
6248 }
6249
6250 pci_dev_put(pdev);
6251
6252 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
6253 if (vtisochctrl & 1)
6254 return;
6255
6256 /* Drop all bits other than the number of TLB entries */
6257 vtisochctrl &= 0x1c;
6258
6259 /* If we have the recommended number of TLB entries (16), fine. */
6260 if (vtisochctrl == 0x10)
6261 return;
6262
6263 /* Zero TLB entries? You get to ride the short bus to school. */
6264 if (!vtisochctrl) {
6265 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
6266 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
6267 dmi_get_system_info(DMI_BIOS_VENDOR),
6268 dmi_get_system_info(DMI_BIOS_VERSION),
6269 dmi_get_system_info(DMI_PRODUCT_VERSION));
6270 iommu_identity_mapping |= IDENTMAP_AZALIA;
6271 return;
6272 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02006273
6274 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07006275 vtisochctrl);
6276}