blob: 7ed0221dccf847af244c8a482de0bf2744589631 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
Christoph Hellwigd657c5c2018-03-19 11:38:20 +010034#include <linux/dma-direct.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070035#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080036#include <linux/memory.h>
Omer Pelegaa473242016-04-20 11:33:02 +030037#include <linux/cpu.h>
mark gross5e0d2a62008-03-04 15:22:08 -080038#include <linux/timer.h>
Dan Williamsdfddb962015-10-09 18:16:46 -040039#include <linux/io.h>
Kay, Allen M38717942008-09-09 18:37:29 +030040#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010041#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030042#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010043#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070044#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100045#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020046#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080047#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070048#include <linux/dma-contiguous.h>
Christoph Hellwigfec777c2018-03-19 11:38:15 +010049#include <linux/dma-direct.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020050#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070051#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090053#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054
Joerg Roedel078e1ee2012-09-26 12:44:43 +020055#include "irq_remapping.h"
Lu Baolu56283172018-07-14 15:46:54 +080056#include "intel-pasid.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020057
Fenghua Yu5b6985c2008-10-16 18:02:32 -070058#define ROOT_SIZE VTD_PAGE_SIZE
59#define CONTEXT_SIZE VTD_PAGE_SIZE
60
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070061#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000062#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070063#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070064#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070065
66#define IOAPIC_RANGE_START (0xfee00000)
67#define IOAPIC_RANGE_END (0xfeefffff)
68#define IOVA_START_ADDR (0x1000)
69
Sohil Mehta5e3b4a12017-12-20 11:59:24 -080070#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070071
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070072#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080073#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070074
David Woodhouse2ebe3152009-09-19 07:34:04 -070075#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
76#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
77
78/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
79 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
80#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
81 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
82#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070083
Robin Murphy1b722502015-01-12 17:51:15 +000084/* IO virtual address start page frame number */
85#define IOVA_START_PFN (1)
86
Mark McLoughlinf27be032008-11-20 15:49:43 +000087#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
mark gross5e0d2a62008-03-04 15:22:08 -080088
Andrew Mortondf08cdc2010-09-22 13:05:11 -070089/* page table handling */
90#define LEVEL_STRIDE (9)
91#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
92
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020093/*
94 * This bitmap is used to advertise the page sizes our hardware support
95 * to the IOMMU core, which will then use this information to split
96 * physically contiguous memory regions it is mapping into page sizes
97 * that we support.
98 *
99 * Traditionally the IOMMU core just handed us the mappings directly,
100 * after making sure the size is an order of a 4KiB page and that the
101 * mapping has natural alignment.
102 *
103 * To retain this behavior, we currently advertise that we support
104 * all page sizes that are an order of 4KiB.
105 *
106 * If at some point we'd like to utilize the IOMMU core's new behavior,
107 * we could change this to advertise the real page sizes we support.
108 */
109#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
110
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700111static inline int agaw_to_level(int agaw)
112{
113 return agaw + 2;
114}
115
116static inline int agaw_to_width(int agaw)
117{
Jiang Liu5c645b32014-01-06 14:18:12 +0800118 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700119}
120
121static inline int width_to_agaw(int width)
122{
Jiang Liu5c645b32014-01-06 14:18:12 +0800123 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700124}
125
126static inline unsigned int level_to_offset_bits(int level)
127{
128 return (level - 1) * LEVEL_STRIDE;
129}
130
131static inline int pfn_level_offset(unsigned long pfn, int level)
132{
133 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
134}
135
136static inline unsigned long level_mask(int level)
137{
138 return -1UL << level_to_offset_bits(level);
139}
140
141static inline unsigned long level_size(int level)
142{
143 return 1UL << level_to_offset_bits(level);
144}
145
146static inline unsigned long align_to_level(unsigned long pfn, int level)
147{
148 return (pfn + level_size(level) - 1) & level_mask(level);
149}
David Woodhousefd18de52009-05-10 23:57:41 +0100150
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
152{
Jiang Liu5c645b32014-01-06 14:18:12 +0800153 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100154}
155
David Woodhousedd4e8312009-06-27 16:21:20 +0100156/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
157 are never going to work. */
158static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
159{
160 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
161}
162
163static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
164{
165 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
166}
167static inline unsigned long page_to_dma_pfn(struct page *pg)
168{
169 return mm_to_dma_pfn(page_to_pfn(pg));
170}
171static inline unsigned long virt_to_dma_pfn(void *p)
172{
173 return page_to_dma_pfn(virt_to_page(p));
174}
175
Weidong Hand9630fe2008-12-08 11:06:32 +0800176/* global iommu list, set NULL for ignored DMAR units */
177static struct intel_iommu **g_iommus;
178
David Woodhousee0fc7e02009-09-30 09:12:17 -0700179static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000180static int rwbf_quirk;
181
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000182/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700183 * set to 1 to panic kernel if can't successfully enable VT-d
184 * (used when kernel is launched w/ TXT)
185 */
186static int force_on = 0;
Shaohua Libfd20f12017-04-26 09:18:35 -0700187int intel_iommu_tboot_noforce;
Joseph Cihulab7792602011-05-03 00:08:37 -0700188
189/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000190 * 0: Present
191 * 1-11: Reserved
192 * 12-63: Context Ptr (12 - (haw-1))
193 * 64-127: Reserved
194 */
195struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000196 u64 lo;
197 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000198};
199#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000200
Joerg Roedel091d42e2015-06-12 11:56:10 +0200201/*
202 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
203 * if marked present.
204 */
205static phys_addr_t root_entry_lctp(struct root_entry *re)
206{
207 if (!(re->lo & 1))
208 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000209
Joerg Roedel091d42e2015-06-12 11:56:10 +0200210 return re->lo & VTD_PAGE_MASK;
211}
212
213/*
214 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
215 * if marked present.
216 */
217static phys_addr_t root_entry_uctp(struct root_entry *re)
218{
219 if (!(re->hi & 1))
220 return 0;
221
222 return re->hi & VTD_PAGE_MASK;
223}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000224/*
225 * low 64 bits:
226 * 0: present
227 * 1: fault processing disable
228 * 2-3: translation type
229 * 12-63: address space root
230 * high 64 bits:
231 * 0-2: address width
232 * 3-6: aval
233 * 8-23: domain id
234 */
235struct context_entry {
236 u64 lo;
237 u64 hi;
238};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000239
Joerg Roedelcf484d02015-06-12 12:21:46 +0200240static inline void context_clear_pasid_enable(struct context_entry *context)
241{
242 context->lo &= ~(1ULL << 11);
243}
244
245static inline bool context_pasid_enabled(struct context_entry *context)
246{
247 return !!(context->lo & (1ULL << 11));
248}
249
250static inline void context_set_copied(struct context_entry *context)
251{
252 context->hi |= (1ull << 3);
253}
254
255static inline bool context_copied(struct context_entry *context)
256{
257 return !!(context->hi & (1ULL << 3));
258}
259
260static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000261{
262 return (context->lo & 1);
263}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200264
265static inline bool context_present(struct context_entry *context)
266{
267 return context_pasid_enabled(context) ?
268 __context_present(context) :
269 __context_present(context) && !context_copied(context);
270}
271
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000272static inline void context_set_present(struct context_entry *context)
273{
274 context->lo |= 1;
275}
276
277static inline void context_set_fault_enable(struct context_entry *context)
278{
279 context->lo &= (((u64)-1) << 2) | 1;
280}
281
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000282static inline void context_set_translation_type(struct context_entry *context,
283 unsigned long value)
284{
285 context->lo &= (((u64)-1) << 4) | 3;
286 context->lo |= (value & 3) << 2;
287}
288
289static inline void context_set_address_root(struct context_entry *context,
290 unsigned long value)
291{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800292 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000293 context->lo |= value & VTD_PAGE_MASK;
294}
295
296static inline void context_set_address_width(struct context_entry *context,
297 unsigned long value)
298{
299 context->hi |= value & 7;
300}
301
302static inline void context_set_domain_id(struct context_entry *context,
303 unsigned long value)
304{
305 context->hi |= (value & ((1 << 16) - 1)) << 8;
306}
307
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200308static inline int context_domain_id(struct context_entry *c)
309{
310 return((c->hi >> 8) & 0xffff);
311}
312
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000313static inline void context_clear_entry(struct context_entry *context)
314{
315 context->lo = 0;
316 context->hi = 0;
317}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000318
Mark McLoughlin622ba122008-11-20 15:49:46 +0000319/*
320 * 0: readable
321 * 1: writable
322 * 2-6: reserved
323 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800324 * 8-10: available
325 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000326 * 12-63: Host physcial address
327 */
328struct dma_pte {
329 u64 val;
330};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000331
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000332static inline void dma_clear_pte(struct dma_pte *pte)
333{
334 pte->val = 0;
335}
336
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000337static inline u64 dma_pte_addr(struct dma_pte *pte)
338{
David Woodhousec85994e2009-07-01 19:21:24 +0100339#ifdef CONFIG_64BIT
340 return pte->val & VTD_PAGE_MASK;
341#else
342 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100343 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100344#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000345}
346
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000347static inline bool dma_pte_present(struct dma_pte *pte)
348{
349 return (pte->val & 3) != 0;
350}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000351
Allen Kay4399c8b2011-10-14 12:32:46 -0700352static inline bool dma_pte_superpage(struct dma_pte *pte)
353{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200354 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700355}
356
David Woodhouse75e6bf92009-07-02 11:21:16 +0100357static inline int first_pte_in_page(struct dma_pte *pte)
358{
359 return !((unsigned long)pte & ~VTD_PAGE_MASK);
360}
361
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700362/*
363 * This domain is a statically identity mapping domain.
364 * 1. This domain creats a static 1:1 mapping to all usable memory.
365 * 2. It maps to each iommu if successful.
366 * 3. Each iommu mapps to this domain if successful.
367 */
David Woodhouse19943b02009-08-04 16:19:20 +0100368static struct dmar_domain *si_domain;
369static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700370
Joerg Roedel28ccce02015-07-21 14:45:31 +0200371/*
372 * Domain represents a virtual machine, more than one devices
Weidong Han1ce28fe2008-12-08 16:35:39 +0800373 * across iommus may be owned in one domain, e.g. kvm guest.
374 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800375#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800376
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700377/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800378#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700379
Joerg Roedel29a27712015-07-21 17:17:12 +0200380#define for_each_domain_iommu(idx, domain) \
381 for (idx = 0; idx < g_num_of_iommus; idx++) \
382 if (domain->iommu_refcnt[idx])
383
Jiang Liub94e4112014-02-19 14:07:25 +0800384struct dmar_rmrr_unit {
385 struct list_head list; /* list of rmrr units */
386 struct acpi_dmar_header *hdr; /* ACPI header */
387 u64 base_address; /* reserved base address*/
388 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000389 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800390 int devices_cnt; /* target device count */
Eric Auger0659b8d2017-01-19 20:57:53 +0000391 struct iommu_resv_region *resv; /* reserved region handle */
Jiang Liub94e4112014-02-19 14:07:25 +0800392};
393
394struct dmar_atsr_unit {
395 struct list_head list; /* list of ATSR units */
396 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000397 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800398 int devices_cnt; /* target device count */
399 u8 include_all:1; /* include all ports */
400};
401
402static LIST_HEAD(dmar_atsr_units);
403static LIST_HEAD(dmar_rmrr_units);
404
405#define for_each_rmrr_units(rmrr) \
406 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
407
mark gross5e0d2a62008-03-04 15:22:08 -0800408/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800409static int g_num_of_iommus;
410
Jiang Liu92d03cc2014-02-19 14:07:28 +0800411static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700412static void domain_remove_dev_info(struct dmar_domain *domain);
Joerg Roedele6de0f82015-07-22 16:30:36 +0200413static void dmar_remove_one_dev_info(struct dmar_domain *domain,
414 struct device *dev);
Joerg Roedel127c7612015-07-23 17:44:46 +0200415static void __dmar_remove_one_dev_info(struct device_domain_info *info);
Joerg Roedel2452d9d2015-07-23 16:20:14 +0200416static void domain_context_clear(struct intel_iommu *iommu,
417 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800418static int domain_detach_iommu(struct dmar_domain *domain,
419 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700420
Suresh Siddhad3f13812011-08-23 17:05:25 -0700421#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800422int dmar_disabled = 0;
423#else
424int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700425#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800426
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200427int intel_iommu_enabled = 0;
428EXPORT_SYMBOL_GPL(intel_iommu_enabled);
429
David Woodhouse2d9e6672010-06-15 10:57:57 +0100430static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700431static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800432static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100433static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100434static int intel_iommu_ecs = 1;
David Woodhouseae853dd2015-09-09 11:58:59 +0100435static int iommu_identity_mapping;
David Woodhousec83b2f22015-06-12 10:15:49 +0100436
David Woodhouseae853dd2015-09-09 11:58:59 +0100437#define IDENTMAP_ALL 1
438#define IDENTMAP_GFX 2
439#define IDENTMAP_AZALIA 4
David Woodhousec83b2f22015-06-12 10:15:49 +0100440
Lu Baoluab967462018-05-04 13:08:18 +0800441#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
442#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700443
David Woodhousec0771df2011-10-14 20:59:46 +0100444int intel_iommu_gfx_mapped;
445EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
446
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700447#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
448static DEFINE_SPINLOCK(device_domain_lock);
449static LIST_HEAD(device_domain_list);
450
Lu Baolu85319dc2018-07-14 15:46:58 +0800451/*
452 * Iterate over elements in device_domain_list and call the specified
453 * callback @fn against each element. This helper should only be used
454 * in the context where the device_domain_lock has already been holden.
455 */
456int for_each_device_domain(int (*fn)(struct device_domain_info *info,
457 void *data), void *data)
458{
459 int ret = 0;
460 struct device_domain_info *info;
461
462 assert_spin_locked(&device_domain_lock);
463 list_for_each_entry(info, &device_domain_list, global) {
464 ret = fn(info, data);
465 if (ret)
466 return ret;
467 }
468
469 return 0;
470}
471
Joerg Roedelb0119e82017-02-01 13:23:08 +0100472const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100473
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200474static bool translation_pre_enabled(struct intel_iommu *iommu)
475{
476 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
477}
478
Joerg Roedel091d42e2015-06-12 11:56:10 +0200479static void clear_translation_pre_enabled(struct intel_iommu *iommu)
480{
481 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
482}
483
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200484static void init_translation_status(struct intel_iommu *iommu)
485{
486 u32 gsts;
487
488 gsts = readl(iommu->reg + DMAR_GSTS_REG);
489 if (gsts & DMA_GSTS_TES)
490 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
491}
492
Joerg Roedel00a77de2015-03-26 13:43:08 +0100493/* Convert generic 'struct iommu_domain to private struct dmar_domain */
494static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
495{
496 return container_of(dom, struct dmar_domain, domain);
497}
498
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700499static int __init intel_iommu_setup(char *str)
500{
501 if (!str)
502 return -EINVAL;
503 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800504 if (!strncmp(str, "on", 2)) {
505 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200506 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800507 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700508 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200509 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510 } else if (!strncmp(str, "igfx_off", 8)) {
511 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200512 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700513 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200514 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700515 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800516 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200517 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800518 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100519 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200520 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100521 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100522 } else if (!strncmp(str, "ecs_off", 7)) {
523 printk(KERN_INFO
524 "Intel-IOMMU: disable extended context table support\n");
525 intel_iommu_ecs = 0;
Shaohua Libfd20f12017-04-26 09:18:35 -0700526 } else if (!strncmp(str, "tboot_noforce", 13)) {
527 printk(KERN_INFO
528 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
529 intel_iommu_tboot_noforce = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700530 }
531
532 str += strcspn(str, ",");
533 while (*str == ',')
534 str++;
535 }
536 return 0;
537}
538__setup("intel_iommu=", intel_iommu_setup);
539
540static struct kmem_cache *iommu_domain_cache;
541static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700542
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200543static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
544{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200545 struct dmar_domain **domains;
546 int idx = did >> 8;
547
548 domains = iommu->domains[idx];
549 if (!domains)
550 return NULL;
551
552 return domains[did & 0xff];
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200553}
554
555static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
556 struct dmar_domain *domain)
557{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200558 struct dmar_domain **domains;
559 int idx = did >> 8;
560
561 if (!iommu->domains[idx]) {
562 size_t size = 256 * sizeof(struct dmar_domain *);
563 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
564 }
565
566 domains = iommu->domains[idx];
567 if (WARN_ON(!domains))
568 return;
569 else
570 domains[did & 0xff] = domain;
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200571}
572
Lu Baolu9ddbfb42018-07-14 15:46:57 +0800573void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700574{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700575 struct page *page;
576 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700577
Suresh Siddha4c923d42009-10-02 11:01:24 -0700578 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
579 if (page)
580 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700581 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700582}
583
Lu Baolu9ddbfb42018-07-14 15:46:57 +0800584void free_pgtable_page(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700585{
586 free_page((unsigned long)vaddr);
587}
588
589static inline void *alloc_domain_mem(void)
590{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900591 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700592}
593
Kay, Allen M38717942008-09-09 18:37:29 +0300594static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700595{
596 kmem_cache_free(iommu_domain_cache, vaddr);
597}
598
599static inline void * alloc_devinfo_mem(void)
600{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900601 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700602}
603
604static inline void free_devinfo_mem(void *vaddr)
605{
606 kmem_cache_free(iommu_devinfo_cache, vaddr);
607}
608
Jiang Liuab8dfe22014-07-11 14:19:27 +0800609static inline int domain_type_is_vm(struct dmar_domain *domain)
610{
611 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
612}
613
Joerg Roedel28ccce02015-07-21 14:45:31 +0200614static inline int domain_type_is_si(struct dmar_domain *domain)
615{
616 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
617}
618
Jiang Liuab8dfe22014-07-11 14:19:27 +0800619static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
620{
621 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
622 DOMAIN_FLAG_STATIC_IDENTITY);
623}
Weidong Han1b573682008-12-08 15:34:06 +0800624
Jiang Liu162d1b12014-07-11 14:19:35 +0800625static inline int domain_pfn_supported(struct dmar_domain *domain,
626 unsigned long pfn)
627{
628 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
629
630 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
631}
632
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700633static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800634{
635 unsigned long sagaw;
636 int agaw = -1;
637
638 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700639 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800640 agaw >= 0; agaw--) {
641 if (test_bit(agaw, &sagaw))
642 break;
643 }
644
645 return agaw;
646}
647
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700648/*
649 * Calculate max SAGAW for each iommu.
650 */
651int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
652{
653 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
654}
655
656/*
657 * calculate agaw for each iommu.
658 * "SAGAW" may be different across iommus, use a default agaw, and
659 * get a supported less agaw for iommus that don't support the default agaw.
660 */
661int iommu_calculate_agaw(struct intel_iommu *iommu)
662{
663 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
664}
665
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700666/* This functionin only returns single iommu in a domain */
Lu Baolu9ddbfb42018-07-14 15:46:57 +0800667struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
Weidong Han8c11e792008-12-08 15:29:22 +0800668{
669 int iommu_id;
670
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700671 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800672 BUG_ON(domain_type_is_vm_or_si(domain));
Joerg Roedel29a27712015-07-21 17:17:12 +0200673 for_each_domain_iommu(iommu_id, domain)
674 break;
675
Weidong Han8c11e792008-12-08 15:29:22 +0800676 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
677 return NULL;
678
679 return g_iommus[iommu_id];
680}
681
Weidong Han8e6040972008-12-08 15:49:06 +0800682static void domain_update_iommu_coherency(struct dmar_domain *domain)
683{
David Woodhoused0501962014-03-11 17:10:29 -0700684 struct dmar_drhd_unit *drhd;
685 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100686 bool found = false;
687 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800688
David Woodhoused0501962014-03-11 17:10:29 -0700689 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800690
Joerg Roedel29a27712015-07-21 17:17:12 +0200691 for_each_domain_iommu(i, domain) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100692 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800693 if (!ecap_coherent(g_iommus[i]->ecap)) {
694 domain->iommu_coherency = 0;
695 break;
696 }
Weidong Han8e6040972008-12-08 15:49:06 +0800697 }
David Woodhoused0501962014-03-11 17:10:29 -0700698 if (found)
699 return;
700
701 /* No hardware attached; use lowest common denominator */
702 rcu_read_lock();
703 for_each_active_iommu(iommu, drhd) {
704 if (!ecap_coherent(iommu->ecap)) {
705 domain->iommu_coherency = 0;
706 break;
707 }
708 }
709 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800710}
711
Jiang Liu161f6932014-07-11 14:19:37 +0800712static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100713{
Allen Kay8140a952011-10-14 12:32:17 -0700714 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800715 struct intel_iommu *iommu;
716 int ret = 1;
717
718 rcu_read_lock();
719 for_each_active_iommu(iommu, drhd) {
720 if (iommu != skip) {
721 if (!ecap_sc_support(iommu->ecap)) {
722 ret = 0;
723 break;
724 }
725 }
726 }
727 rcu_read_unlock();
728
729 return ret;
730}
731
732static int domain_update_iommu_superpage(struct intel_iommu *skip)
733{
734 struct dmar_drhd_unit *drhd;
735 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700736 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100737
738 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800739 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100740 }
741
Allen Kay8140a952011-10-14 12:32:17 -0700742 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800743 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700744 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800745 if (iommu != skip) {
746 mask &= cap_super_page_val(iommu->cap);
747 if (!mask)
748 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100749 }
750 }
Jiang Liu0e242612014-02-19 14:07:34 +0800751 rcu_read_unlock();
752
Jiang Liu161f6932014-07-11 14:19:37 +0800753 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100754}
755
Sheng Yang58c610b2009-03-18 15:33:05 +0800756/* Some capabilities may be different across iommus */
757static void domain_update_iommu_cap(struct dmar_domain *domain)
758{
759 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800760 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
761 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800762}
763
David Woodhouse03ecc322015-02-13 14:35:21 +0000764static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
765 u8 bus, u8 devfn, int alloc)
766{
767 struct root_entry *root = &iommu->root_entry[bus];
768 struct context_entry *context;
769 u64 *entry;
770
Joerg Roedel4df4eab2015-08-25 10:54:28 +0200771 entry = &root->lo;
David Woodhousec83b2f22015-06-12 10:15:49 +0100772 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000773 if (devfn >= 0x80) {
774 devfn -= 0x80;
775 entry = &root->hi;
776 }
777 devfn *= 2;
778 }
David Woodhouse03ecc322015-02-13 14:35:21 +0000779 if (*entry & 1)
780 context = phys_to_virt(*entry & VTD_PAGE_MASK);
781 else {
782 unsigned long phy_addr;
783 if (!alloc)
784 return NULL;
785
786 context = alloc_pgtable_page(iommu->node);
787 if (!context)
788 return NULL;
789
790 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
791 phy_addr = virt_to_phys((void *)context);
792 *entry = phy_addr | 1;
793 __iommu_flush_cache(iommu, entry, sizeof(*entry));
794 }
795 return &context[devfn];
796}
797
David Woodhouse4ed6a542015-05-11 14:59:20 +0100798static int iommu_dummy(struct device *dev)
799{
800 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
801}
802
David Woodhouse156baca2014-03-09 14:00:57 -0700803static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800804{
805 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800806 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700807 struct device *tmp;
808 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800809 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800810 int i;
811
David Woodhouse4ed6a542015-05-11 14:59:20 +0100812 if (iommu_dummy(dev))
813 return NULL;
814
David Woodhouse156baca2014-03-09 14:00:57 -0700815 if (dev_is_pci(dev)) {
Ashok Raj1c387182016-10-21 15:32:05 -0700816 struct pci_dev *pf_pdev;
817
David Woodhouse156baca2014-03-09 14:00:57 -0700818 pdev = to_pci_dev(dev);
Jon Derrick5823e332017-08-30 15:05:59 -0600819
820#ifdef CONFIG_X86
821 /* VMD child devices currently cannot be handled individually */
822 if (is_vmd(pdev->bus))
823 return NULL;
824#endif
825
Ashok Raj1c387182016-10-21 15:32:05 -0700826 /* VFs aren't listed in scope tables; we need to look up
827 * the PF instead to find the IOMMU. */
828 pf_pdev = pci_physfn(pdev);
829 dev = &pf_pdev->dev;
David Woodhouse156baca2014-03-09 14:00:57 -0700830 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100831 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700832 dev = &ACPI_COMPANION(dev)->dev;
833
Jiang Liu0e242612014-02-19 14:07:34 +0800834 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800835 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700836 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100837 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800838
Jiang Liub683b232014-02-19 14:07:32 +0800839 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700840 drhd->devices_cnt, i, tmp) {
841 if (tmp == dev) {
Ashok Raj1c387182016-10-21 15:32:05 -0700842 /* For a VF use its original BDF# not that of the PF
843 * which we used for the IOMMU lookup. Strictly speaking
844 * we could do this for all PCI devices; we only need to
845 * get the BDF# from the scope table for ACPI matches. */
Koos Vriezen5003ae12017-03-01 21:02:50 +0100846 if (pdev && pdev->is_virtfn)
Ashok Raj1c387182016-10-21 15:32:05 -0700847 goto got_pdev;
848
David Woodhouse156baca2014-03-09 14:00:57 -0700849 *bus = drhd->devices[i].bus;
850 *devfn = drhd->devices[i].devfn;
851 goto out;
852 }
853
854 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000855 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700856
857 ptmp = to_pci_dev(tmp);
858 if (ptmp->subordinate &&
859 ptmp->subordinate->number <= pdev->bus->number &&
860 ptmp->subordinate->busn_res.end >= pdev->bus->number)
861 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100862 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800863
David Woodhouse156baca2014-03-09 14:00:57 -0700864 if (pdev && drhd->include_all) {
865 got_pdev:
866 *bus = pdev->bus->number;
867 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800868 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700869 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800870 }
Jiang Liub683b232014-02-19 14:07:32 +0800871 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700872 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800873 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800874
Jiang Liub683b232014-02-19 14:07:32 +0800875 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800876}
877
Weidong Han5331fe62008-12-08 23:00:00 +0800878static void domain_flush_cache(struct dmar_domain *domain,
879 void *addr, int size)
880{
881 if (!domain->iommu_coherency)
882 clflush_cache_range(addr, size);
883}
884
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700885static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
886{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000888 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700889 unsigned long flags;
890
891 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000892 context = iommu_context_addr(iommu, bus, devfn, 0);
893 if (context)
894 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700895 spin_unlock_irqrestore(&iommu->lock, flags);
896 return ret;
897}
898
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899static void free_context_table(struct intel_iommu *iommu)
900{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901 int i;
902 unsigned long flags;
903 struct context_entry *context;
904
905 spin_lock_irqsave(&iommu->lock, flags);
906 if (!iommu->root_entry) {
907 goto out;
908 }
909 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000910 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700911 if (context)
912 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000913
David Woodhousec83b2f22015-06-12 10:15:49 +0100914 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000915 continue;
916
917 context = iommu_context_addr(iommu, i, 0x80, 0);
918 if (context)
919 free_pgtable_page(context);
920
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700921 }
922 free_pgtable_page(iommu->root_entry);
923 iommu->root_entry = NULL;
924out:
925 spin_unlock_irqrestore(&iommu->lock, flags);
926}
927
David Woodhouseb026fd22009-06-28 10:37:25 +0100928static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000929 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931 struct dma_pte *parent, *pte = NULL;
932 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700933 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700934
935 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200936
Jiang Liu162d1b12014-07-11 14:19:35 +0800937 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200938 /* Address beyond IOMMU's addressing capabilities. */
939 return NULL;
940
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700941 parent = domain->pgd;
942
David Woodhouse5cf0a762014-03-19 16:07:49 +0000943 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700944 void *tmp_page;
945
David Woodhouseb026fd22009-06-28 10:37:25 +0100946 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000948 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100949 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000950 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700951 break;
952
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000953 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100954 uint64_t pteval;
955
Suresh Siddha4c923d42009-10-02 11:01:24 -0700956 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957
David Woodhouse206a73c2009-07-01 19:30:28 +0100958 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100960
David Woodhousec85994e2009-07-01 19:21:24 +0100961 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400962 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800963 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100964 /* Someone else set it while we were thinking; use theirs. */
965 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800966 else
David Woodhousec85994e2009-07-01 19:21:24 +0100967 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000969 if (level == 1)
970 break;
971
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000972 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973 level--;
974 }
975
David Woodhouse5cf0a762014-03-19 16:07:49 +0000976 if (!*target_level)
977 *target_level = level;
978
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979 return pte;
980}
981
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100982
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100984static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
985 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100986 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987{
988 struct dma_pte *parent, *pte = NULL;
989 int total = agaw_to_level(domain->agaw);
990 int offset;
991
992 parent = domain->pgd;
993 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100994 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700995 pte = &parent[offset];
996 if (level == total)
997 return pte;
998
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100999 if (!dma_pte_present(pte)) {
1000 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001002 }
1003
Yijing Wange16922a2014-05-20 20:37:51 +08001004 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001005 *large_page = total;
1006 return pte;
1007 }
1008
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001009 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001010 total--;
1011 }
1012 return NULL;
1013}
1014
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001016static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +01001017 unsigned long start_pfn,
1018 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001019{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001020 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001021 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001022
Jiang Liu162d1b12014-07-11 14:19:35 +08001023 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1024 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001025 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001026
David Woodhouse04b18e62009-06-27 19:15:01 +01001027 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001028 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001029 large_page = 1;
1030 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001031 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001032 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001033 continue;
1034 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001035 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001036 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001037 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001038 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001039 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1040
David Woodhouse310a5ab2009-06-28 18:52:20 +01001041 domain_flush_cache(domain, first_pte,
1042 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001043
1044 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001045}
1046
Alex Williamson3269ee02013-06-15 10:27:19 -06001047static void dma_pte_free_level(struct dmar_domain *domain, int level,
David Dillowbc24c572017-06-28 19:42:23 -07001048 int retain_level, struct dma_pte *pte,
1049 unsigned long pfn, unsigned long start_pfn,
1050 unsigned long last_pfn)
Alex Williamson3269ee02013-06-15 10:27:19 -06001051{
1052 pfn = max(start_pfn, pfn);
1053 pte = &pte[pfn_level_offset(pfn, level)];
1054
1055 do {
1056 unsigned long level_pfn;
1057 struct dma_pte *level_pte;
1058
1059 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1060 goto next;
1061
David Dillowf7116e12017-01-30 19:11:11 -08001062 level_pfn = pfn & level_mask(level);
Alex Williamson3269ee02013-06-15 10:27:19 -06001063 level_pte = phys_to_virt(dma_pte_addr(pte));
1064
David Dillowbc24c572017-06-28 19:42:23 -07001065 if (level > 2) {
1066 dma_pte_free_level(domain, level - 1, retain_level,
1067 level_pte, level_pfn, start_pfn,
1068 last_pfn);
1069 }
Alex Williamson3269ee02013-06-15 10:27:19 -06001070
David Dillowbc24c572017-06-28 19:42:23 -07001071 /*
1072 * Free the page table if we're below the level we want to
1073 * retain and the range covers the entire table.
1074 */
1075 if (level < retain_level && !(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001076 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001077 dma_clear_pte(pte);
1078 domain_flush_cache(domain, pte, sizeof(*pte));
1079 free_pgtable_page(level_pte);
1080 }
1081next:
1082 pfn += level_size(level);
1083 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1084}
1085
David Dillowbc24c572017-06-28 19:42:23 -07001086/*
1087 * clear last level (leaf) ptes and free page table pages below the
1088 * level we wish to keep intact.
1089 */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001090static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001091 unsigned long start_pfn,
David Dillowbc24c572017-06-28 19:42:23 -07001092 unsigned long last_pfn,
1093 int retain_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001094{
Jiang Liu162d1b12014-07-11 14:19:35 +08001095 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1096 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001097 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001098
Jiang Liud41a4ad2014-07-11 14:19:34 +08001099 dma_pte_clear_range(domain, start_pfn, last_pfn);
1100
David Woodhousef3a0a522009-06-30 03:40:07 +01001101 /* We don't need lock here; nobody else touches the iova range */
David Dillowbc24c572017-06-28 19:42:23 -07001102 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
Alex Williamson3269ee02013-06-15 10:27:19 -06001103 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001104
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001105 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001106 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001107 free_pgtable_page(domain->pgd);
1108 domain->pgd = NULL;
1109 }
1110}
1111
David Woodhouseea8ea462014-03-05 17:09:32 +00001112/* When a page at a given level is being unlinked from its parent, we don't
1113 need to *modify* it at all. All we need to do is make a list of all the
1114 pages which can be freed just as soon as we've flushed the IOTLB and we
1115 know the hardware page-walk will no longer touch them.
1116 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1117 be freed. */
1118static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1119 int level, struct dma_pte *pte,
1120 struct page *freelist)
1121{
1122 struct page *pg;
1123
1124 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1125 pg->freelist = freelist;
1126 freelist = pg;
1127
1128 if (level == 1)
1129 return freelist;
1130
Jiang Liuadeb2592014-04-09 10:20:39 +08001131 pte = page_address(pg);
1132 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001133 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1134 freelist = dma_pte_list_pagetables(domain, level - 1,
1135 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001136 pte++;
1137 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001138
1139 return freelist;
1140}
1141
1142static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1143 struct dma_pte *pte, unsigned long pfn,
1144 unsigned long start_pfn,
1145 unsigned long last_pfn,
1146 struct page *freelist)
1147{
1148 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1149
1150 pfn = max(start_pfn, pfn);
1151 pte = &pte[pfn_level_offset(pfn, level)];
1152
1153 do {
1154 unsigned long level_pfn;
1155
1156 if (!dma_pte_present(pte))
1157 goto next;
1158
1159 level_pfn = pfn & level_mask(level);
1160
1161 /* If range covers entire pagetable, free it */
1162 if (start_pfn <= level_pfn &&
1163 last_pfn >= level_pfn + level_size(level) - 1) {
1164 /* These suborbinate page tables are going away entirely. Don't
1165 bother to clear them; we're just going to *free* them. */
1166 if (level > 1 && !dma_pte_superpage(pte))
1167 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1168
1169 dma_clear_pte(pte);
1170 if (!first_pte)
1171 first_pte = pte;
1172 last_pte = pte;
1173 } else if (level > 1) {
1174 /* Recurse down into a level that isn't *entirely* obsolete */
1175 freelist = dma_pte_clear_level(domain, level - 1,
1176 phys_to_virt(dma_pte_addr(pte)),
1177 level_pfn, start_pfn, last_pfn,
1178 freelist);
1179 }
1180next:
1181 pfn += level_size(level);
1182 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1183
1184 if (first_pte)
1185 domain_flush_cache(domain, first_pte,
1186 (void *)++last_pte - (void *)first_pte);
1187
1188 return freelist;
1189}
1190
1191/* We can't just free the pages because the IOMMU may still be walking
1192 the page tables, and may have cached the intermediate levels. The
1193 pages can only be freed after the IOTLB flush has been done. */
Joerg Roedelb6904202015-08-13 11:32:18 +02001194static struct page *domain_unmap(struct dmar_domain *domain,
1195 unsigned long start_pfn,
1196 unsigned long last_pfn)
David Woodhouseea8ea462014-03-05 17:09:32 +00001197{
David Woodhouseea8ea462014-03-05 17:09:32 +00001198 struct page *freelist = NULL;
1199
Jiang Liu162d1b12014-07-11 14:19:35 +08001200 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1201 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001202 BUG_ON(start_pfn > last_pfn);
1203
1204 /* we don't need lock here; nobody else touches the iova range */
1205 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1206 domain->pgd, 0, start_pfn, last_pfn, NULL);
1207
1208 /* free pgd */
1209 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1210 struct page *pgd_page = virt_to_page(domain->pgd);
1211 pgd_page->freelist = freelist;
1212 freelist = pgd_page;
1213
1214 domain->pgd = NULL;
1215 }
1216
1217 return freelist;
1218}
1219
Joerg Roedelb6904202015-08-13 11:32:18 +02001220static void dma_free_pagelist(struct page *freelist)
David Woodhouseea8ea462014-03-05 17:09:32 +00001221{
1222 struct page *pg;
1223
1224 while ((pg = freelist)) {
1225 freelist = pg->freelist;
1226 free_pgtable_page(page_address(pg));
1227 }
1228}
1229
Joerg Roedel13cf0172017-08-11 11:40:10 +02001230static void iova_entry_free(unsigned long data)
1231{
1232 struct page *freelist = (struct page *)data;
1233
1234 dma_free_pagelist(freelist);
1235}
1236
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237/* iommu handling */
1238static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1239{
1240 struct root_entry *root;
1241 unsigned long flags;
1242
Suresh Siddha4c923d42009-10-02 11:01:24 -07001243 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001244 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001245 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001246 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001247 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001248 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001249
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001250 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251
1252 spin_lock_irqsave(&iommu->lock, flags);
1253 iommu->root_entry = root;
1254 spin_unlock_irqrestore(&iommu->lock, flags);
1255
1256 return 0;
1257}
1258
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259static void iommu_set_root_entry(struct intel_iommu *iommu)
1260{
David Woodhouse03ecc322015-02-13 14:35:21 +00001261 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001262 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001263 unsigned long flag;
1264
David Woodhouse03ecc322015-02-13 14:35:21 +00001265 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001266 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001267 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001268
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001269 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001270 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271
David Woodhousec416daa2009-05-10 20:30:58 +01001272 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001273
1274 /* Make sure hardware complete it */
1275 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001276 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001277
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001278 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279}
1280
1281static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1282{
1283 u32 val;
1284 unsigned long flag;
1285
David Woodhouse9af88142009-02-13 23:18:03 +00001286 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001287 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001289 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001290 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001291
1292 /* Make sure hardware complete it */
1293 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001294 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001295
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001296 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001297}
1298
1299/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001300static void __iommu_flush_context(struct intel_iommu *iommu,
1301 u16 did, u16 source_id, u8 function_mask,
1302 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001303{
1304 u64 val = 0;
1305 unsigned long flag;
1306
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307 switch (type) {
1308 case DMA_CCMD_GLOBAL_INVL:
1309 val = DMA_CCMD_GLOBAL_INVL;
1310 break;
1311 case DMA_CCMD_DOMAIN_INVL:
1312 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1313 break;
1314 case DMA_CCMD_DEVICE_INVL:
1315 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1316 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1317 break;
1318 default:
1319 BUG();
1320 }
1321 val |= DMA_CCMD_ICC;
1322
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001323 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001324 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1325
1326 /* Make sure hardware complete it */
1327 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1328 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1329
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001330 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001331}
1332
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001333/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001334static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1335 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001336{
1337 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1338 u64 val = 0, val_iva = 0;
1339 unsigned long flag;
1340
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341 switch (type) {
1342 case DMA_TLB_GLOBAL_FLUSH:
1343 /* global flush doesn't need set IVA_REG */
1344 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1345 break;
1346 case DMA_TLB_DSI_FLUSH:
1347 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1348 break;
1349 case DMA_TLB_PSI_FLUSH:
1350 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001351 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352 val_iva = size_order | addr;
1353 break;
1354 default:
1355 BUG();
1356 }
1357 /* Note: set drain read/write */
1358#if 0
1359 /*
1360 * This is probably to be super secure.. Looks like we can
1361 * ignore it without any impact.
1362 */
1363 if (cap_read_drain(iommu->cap))
1364 val |= DMA_TLB_READ_DRAIN;
1365#endif
1366 if (cap_write_drain(iommu->cap))
1367 val |= DMA_TLB_WRITE_DRAIN;
1368
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001369 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001370 /* Note: Only uses first TLB reg currently */
1371 if (val_iva)
1372 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1373 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1374
1375 /* Make sure hardware complete it */
1376 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1377 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1378
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001379 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001380
1381 /* check IOTLB invalidation granularity */
1382 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001383 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001384 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001385 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001386 (unsigned long long)DMA_TLB_IIRG(type),
1387 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001388}
1389
David Woodhouse64ae8922014-03-09 12:52:30 -07001390static struct device_domain_info *
1391iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1392 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001393{
Yu Zhao93a23a72009-05-18 13:51:37 +08001394 struct device_domain_info *info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001395
Joerg Roedel55d94042015-07-22 16:50:40 +02001396 assert_spin_locked(&device_domain_lock);
1397
Yu Zhao93a23a72009-05-18 13:51:37 +08001398 if (!iommu->qi)
1399 return NULL;
1400
Yu Zhao93a23a72009-05-18 13:51:37 +08001401 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001402 if (info->iommu == iommu && info->bus == bus &&
1403 info->devfn == devfn) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001404 if (info->ats_supported && info->dev)
1405 return info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001406 break;
1407 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001408
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001409 return NULL;
Yu Zhao93a23a72009-05-18 13:51:37 +08001410}
1411
Omer Peleg0824c592016-04-20 19:03:35 +03001412static void domain_update_iotlb(struct dmar_domain *domain)
1413{
1414 struct device_domain_info *info;
1415 bool has_iotlb_device = false;
1416
1417 assert_spin_locked(&device_domain_lock);
1418
1419 list_for_each_entry(info, &domain->devices, link) {
1420 struct pci_dev *pdev;
1421
1422 if (!info->dev || !dev_is_pci(info->dev))
1423 continue;
1424
1425 pdev = to_pci_dev(info->dev);
1426 if (pdev->ats_enabled) {
1427 has_iotlb_device = true;
1428 break;
1429 }
1430 }
1431
1432 domain->has_iotlb_device = has_iotlb_device;
1433}
1434
Yu Zhao93a23a72009-05-18 13:51:37 +08001435static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1436{
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001437 struct pci_dev *pdev;
1438
Omer Peleg0824c592016-04-20 19:03:35 +03001439 assert_spin_locked(&device_domain_lock);
1440
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001441 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001442 return;
1443
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001444 pdev = to_pci_dev(info->dev);
Jacob Pan1c48db42018-06-07 09:57:00 -07001445 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1446 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1447 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1448 * reserved, which should be set to 0.
1449 */
1450 if (!ecap_dit(info->iommu->ecap))
1451 info->pfsid = 0;
1452 else {
1453 struct pci_dev *pf_pdev;
1454
1455 /* pdev will be returned if device is not a vf */
1456 pf_pdev = pci_physfn(pdev);
1457 info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
1458 }
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001459
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001460#ifdef CONFIG_INTEL_IOMMU_SVM
1461 /* The PCIe spec, in its wisdom, declares that the behaviour of
1462 the device if you enable PASID support after ATS support is
1463 undefined. So always enable PASID support on devices which
1464 have it, even if we can't yet know if we're ever going to
1465 use it. */
1466 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1467 info->pasid_enabled = 1;
1468
1469 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1470 info->pri_enabled = 1;
1471#endif
1472 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1473 info->ats_enabled = 1;
Omer Peleg0824c592016-04-20 19:03:35 +03001474 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001475 info->ats_qdep = pci_ats_queue_depth(pdev);
1476 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001477}
1478
1479static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1480{
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001481 struct pci_dev *pdev;
1482
Omer Peleg0824c592016-04-20 19:03:35 +03001483 assert_spin_locked(&device_domain_lock);
1484
Jeremy McNicollda972fb2016-01-14 21:33:06 -08001485 if (!dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001486 return;
1487
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001488 pdev = to_pci_dev(info->dev);
1489
1490 if (info->ats_enabled) {
1491 pci_disable_ats(pdev);
1492 info->ats_enabled = 0;
Omer Peleg0824c592016-04-20 19:03:35 +03001493 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001494 }
1495#ifdef CONFIG_INTEL_IOMMU_SVM
1496 if (info->pri_enabled) {
1497 pci_disable_pri(pdev);
1498 info->pri_enabled = 0;
1499 }
1500 if (info->pasid_enabled) {
1501 pci_disable_pasid(pdev);
1502 info->pasid_enabled = 0;
1503 }
1504#endif
Yu Zhao93a23a72009-05-18 13:51:37 +08001505}
1506
1507static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1508 u64 addr, unsigned mask)
1509{
1510 u16 sid, qdep;
1511 unsigned long flags;
1512 struct device_domain_info *info;
1513
Omer Peleg0824c592016-04-20 19:03:35 +03001514 if (!domain->has_iotlb_device)
1515 return;
1516
Yu Zhao93a23a72009-05-18 13:51:37 +08001517 spin_lock_irqsave(&device_domain_lock, flags);
1518 list_for_each_entry(info, &domain->devices, link) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001519 if (!info->ats_enabled)
Yu Zhao93a23a72009-05-18 13:51:37 +08001520 continue;
1521
1522 sid = info->bus << 8 | info->devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001523 qdep = info->ats_qdep;
Jacob Pan1c48db42018-06-07 09:57:00 -07001524 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1525 qdep, addr, mask);
Yu Zhao93a23a72009-05-18 13:51:37 +08001526 }
1527 spin_unlock_irqrestore(&device_domain_lock, flags);
1528}
1529
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001530static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1531 struct dmar_domain *domain,
1532 unsigned long pfn, unsigned int pages,
1533 int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001535 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001536 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001537 u16 did = domain->iommu_did[iommu->seq_id];
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001539 BUG_ON(pages == 0);
1540
David Woodhouseea8ea462014-03-05 17:09:32 +00001541 if (ih)
1542 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001543 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001544 * Fallback to domain selective flush if no PSI support or the size is
1545 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546 * PSI requires page size to be 2 ^ x, and the base address is naturally
1547 * aligned to the size
1548 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001549 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1550 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001551 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001552 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001553 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001554 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001555
1556 /*
Nadav Amit82653632010-04-01 13:24:40 +03001557 * In caching mode, changes of pages from non-present to present require
1558 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001559 */
Nadav Amit82653632010-04-01 13:24:40 +03001560 if (!cap_caching_mode(iommu->cap) || !map)
Peter Xu9d2e6502018-01-10 13:51:37 +08001561 iommu_flush_dev_iotlb(domain, addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001562}
1563
Peter Xueed91a02018-05-04 10:34:52 +08001564/* Notification for newly created mappings */
1565static inline void __mapping_notify_one(struct intel_iommu *iommu,
1566 struct dmar_domain *domain,
1567 unsigned long pfn, unsigned int pages)
1568{
1569 /* It's a non-present to present mapping. Only flush if caching mode */
1570 if (cap_caching_mode(iommu->cap))
1571 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1572 else
1573 iommu_flush_write_buffer(iommu);
1574}
1575
Joerg Roedel13cf0172017-08-11 11:40:10 +02001576static void iommu_flush_iova(struct iova_domain *iovad)
1577{
1578 struct dmar_domain *domain;
1579 int idx;
1580
1581 domain = container_of(iovad, struct dmar_domain, iovad);
1582
1583 for_each_domain_iommu(idx, domain) {
1584 struct intel_iommu *iommu = g_iommus[idx];
1585 u16 did = domain->iommu_did[iommu->seq_id];
1586
1587 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1588
1589 if (!cap_caching_mode(iommu->cap))
1590 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1591 0, MAX_AGAW_PFN_WIDTH);
1592 }
1593}
1594
mark grossf8bab732008-02-08 04:18:38 -08001595static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1596{
1597 u32 pmen;
1598 unsigned long flags;
1599
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001600 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001601 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1602 pmen &= ~DMA_PMEN_EPM;
1603 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1604
1605 /* wait for the protected region status bit to clear */
1606 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1607 readl, !(pmen & DMA_PMEN_PRS), pmen);
1608
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001609 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001610}
1611
Jiang Liu2a41cce2014-07-11 14:19:33 +08001612static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001613{
1614 u32 sts;
1615 unsigned long flags;
1616
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001617 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001618 iommu->gcmd |= DMA_GCMD_TE;
1619 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001620
1621 /* Make sure hardware complete it */
1622 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001623 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001624
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001625 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001626}
1627
Jiang Liu2a41cce2014-07-11 14:19:33 +08001628static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001629{
1630 u32 sts;
1631 unsigned long flag;
1632
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001633 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001634 iommu->gcmd &= ~DMA_GCMD_TE;
1635 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1636
1637 /* Make sure hardware complete it */
1638 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001639 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001641 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642}
1643
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001644
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645static int iommu_init_domains(struct intel_iommu *iommu)
1646{
Joerg Roedel8bf47812015-07-21 10:41:21 +02001647 u32 ndomains, nlongs;
1648 size_t size;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001649
1650 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001651 pr_debug("%s: Number of Domains supported <%d>\n",
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001652 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001653 nlongs = BITS_TO_LONGS(ndomains);
1654
Donald Dutile94a91b502009-08-20 16:51:34 -04001655 spin_lock_init(&iommu->lock);
1656
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001657 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1658 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001659 pr_err("%s: Allocating domain id array failed\n",
1660 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001661 return -ENOMEM;
1662 }
Joerg Roedel8bf47812015-07-21 10:41:21 +02001663
Wei Yang86f004c2016-05-21 02:41:51 +00001664 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001665 iommu->domains = kzalloc(size, GFP_KERNEL);
1666
1667 if (iommu->domains) {
1668 size = 256 * sizeof(struct dmar_domain *);
1669 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1670 }
1671
1672 if (!iommu->domains || !iommu->domains[0]) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001673 pr_err("%s: Allocating domain array failed\n",
1674 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001675 kfree(iommu->domain_ids);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001676 kfree(iommu->domains);
Jiang Liu852bdb02014-01-06 14:18:11 +08001677 iommu->domain_ids = NULL;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001678 iommu->domains = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001679 return -ENOMEM;
1680 }
1681
Joerg Roedel8bf47812015-07-21 10:41:21 +02001682
1683
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001684 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001685 * If Caching mode is set, then invalid translations are tagged
1686 * with domain-id 0, hence we need to pre-allocate it. We also
1687 * use domain-id 0 as a marker for non-allocated domain-id, so
1688 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001690 set_bit(0, iommu->domain_ids);
1691
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001692 return 0;
1693}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001694
Jiang Liuffebeb42014-11-09 22:48:02 +08001695static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001696{
Joerg Roedel29a27712015-07-21 17:17:12 +02001697 struct device_domain_info *info, *tmp;
Joerg Roedel55d94042015-07-22 16:50:40 +02001698 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699
Joerg Roedel29a27712015-07-21 17:17:12 +02001700 if (!iommu->domains || !iommu->domain_ids)
1701 return;
Jiang Liua4eaa862014-02-19 14:07:30 +08001702
Joerg Roedelbea64032016-11-08 15:08:26 +01001703again:
Joerg Roedel55d94042015-07-22 16:50:40 +02001704 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001705 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1706 struct dmar_domain *domain;
1707
1708 if (info->iommu != iommu)
1709 continue;
1710
1711 if (!info->dev || !info->domain)
1712 continue;
1713
1714 domain = info->domain;
1715
Joerg Roedelbea64032016-11-08 15:08:26 +01001716 __dmar_remove_one_dev_info(info);
Joerg Roedel29a27712015-07-21 17:17:12 +02001717
Joerg Roedelbea64032016-11-08 15:08:26 +01001718 if (!domain_type_is_vm_or_si(domain)) {
1719 /*
1720 * The domain_exit() function can't be called under
1721 * device_domain_lock, as it takes this lock itself.
1722 * So release the lock here and re-run the loop
1723 * afterwards.
1724 */
1725 spin_unlock_irqrestore(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001726 domain_exit(domain);
Joerg Roedelbea64032016-11-08 15:08:26 +01001727 goto again;
1728 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729 }
Joerg Roedel55d94042015-07-22 16:50:40 +02001730 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731
1732 if (iommu->gcmd & DMA_GCMD_TE)
1733 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001734}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735
Jiang Liuffebeb42014-11-09 22:48:02 +08001736static void free_dmar_iommu(struct intel_iommu *iommu)
1737{
1738 if ((iommu->domains) && (iommu->domain_ids)) {
Wei Yang86f004c2016-05-21 02:41:51 +00001739 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001740 int i;
1741
1742 for (i = 0; i < elems; i++)
1743 kfree(iommu->domains[i]);
Jiang Liuffebeb42014-11-09 22:48:02 +08001744 kfree(iommu->domains);
1745 kfree(iommu->domain_ids);
1746 iommu->domains = NULL;
1747 iommu->domain_ids = NULL;
1748 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001749
Weidong Hand9630fe2008-12-08 11:06:32 +08001750 g_iommus[iommu->seq_id] = NULL;
1751
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001752 /* free context mapping */
1753 free_context_table(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001754
1755#ifdef CONFIG_INTEL_IOMMU_SVM
David Woodhousea222a7f2015-10-07 23:35:18 +01001756 if (pasid_enabled(iommu)) {
1757 if (ecap_prs(iommu->ecap))
1758 intel_svm_finish_prq(iommu);
Lu Baolud9737952018-07-14 15:47:02 +08001759 intel_svm_exit(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01001760 }
David Woodhouse8a94ade2015-03-24 14:54:56 +00001761#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001762}
1763
Jiang Liuab8dfe22014-07-11 14:19:27 +08001764static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001765{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001766 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767
1768 domain = alloc_domain_mem();
1769 if (!domain)
1770 return NULL;
1771
Jiang Liuab8dfe22014-07-11 14:19:27 +08001772 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001773 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001774 domain->flags = flags;
Omer Peleg0824c592016-04-20 19:03:35 +03001775 domain->has_iotlb_device = false;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001776 INIT_LIST_HEAD(&domain->devices);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001777
1778 return domain;
1779}
1780
Joerg Roedeld160aca2015-07-22 11:52:53 +02001781/* Must be called with iommu->lock */
1782static int domain_attach_iommu(struct dmar_domain *domain,
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001783 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001784{
Jiang Liu44bde612014-07-11 14:19:29 +08001785 unsigned long ndomains;
Joerg Roedel55d94042015-07-22 16:50:40 +02001786 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001787
Joerg Roedel55d94042015-07-22 16:50:40 +02001788 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001789 assert_spin_locked(&iommu->lock);
Jiang Liu44bde612014-07-11 14:19:29 +08001790
Joerg Roedel29a27712015-07-21 17:17:12 +02001791 domain->iommu_refcnt[iommu->seq_id] += 1;
1792 domain->iommu_count += 1;
1793 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
Jiang Liufb170fb2014-07-11 14:19:28 +08001794 ndomains = cap_ndoms(iommu->cap);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001795 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1796
1797 if (num >= ndomains) {
1798 pr_err("%s: No free domain ids\n", iommu->name);
1799 domain->iommu_refcnt[iommu->seq_id] -= 1;
1800 domain->iommu_count -= 1;
Joerg Roedel55d94042015-07-22 16:50:40 +02001801 return -ENOSPC;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001802 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001803
Joerg Roedeld160aca2015-07-22 11:52:53 +02001804 set_bit(num, iommu->domain_ids);
1805 set_iommu_domain(iommu, num, domain);
Jiang Liufb170fb2014-07-11 14:19:28 +08001806
Joerg Roedeld160aca2015-07-22 11:52:53 +02001807 domain->iommu_did[iommu->seq_id] = num;
1808 domain->nid = iommu->node;
1809
Jiang Liufb170fb2014-07-11 14:19:28 +08001810 domain_update_iommu_cap(domain);
1811 }
Joerg Roedeld160aca2015-07-22 11:52:53 +02001812
Joerg Roedel55d94042015-07-22 16:50:40 +02001813 return 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001814}
1815
1816static int domain_detach_iommu(struct dmar_domain *domain,
1817 struct intel_iommu *iommu)
1818{
Joerg Roedeld160aca2015-07-22 11:52:53 +02001819 int num, count = INT_MAX;
Jiang Liufb170fb2014-07-11 14:19:28 +08001820
Joerg Roedel55d94042015-07-22 16:50:40 +02001821 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001822 assert_spin_locked(&iommu->lock);
Jiang Liufb170fb2014-07-11 14:19:28 +08001823
Joerg Roedel29a27712015-07-21 17:17:12 +02001824 domain->iommu_refcnt[iommu->seq_id] -= 1;
1825 count = --domain->iommu_count;
1826 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02001827 num = domain->iommu_did[iommu->seq_id];
1828 clear_bit(num, iommu->domain_ids);
1829 set_iommu_domain(iommu, num, NULL);
1830
Jiang Liufb170fb2014-07-11 14:19:28 +08001831 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001832 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001833 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001834
1835 return count;
1836}
1837
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001838static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001839static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840
Joseph Cihula51a63e62011-03-21 11:04:24 -07001841static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001842{
1843 struct pci_dev *pdev = NULL;
1844 struct iova *iova;
1845 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001846
Zhen Leiaa3ac942017-09-21 16:52:45 +01001847 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001848
Mark Gross8a443df2008-03-04 14:59:31 -08001849 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1850 &reserved_rbtree_key);
1851
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001852 /* IOAPIC ranges shouldn't be accessed by DMA */
1853 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1854 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001855 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001856 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001857 return -ENODEV;
1858 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001859
1860 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1861 for_each_pci_dev(pdev) {
1862 struct resource *r;
1863
1864 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1865 r = &pdev->resource[i];
1866 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1867 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001868 iova = reserve_iova(&reserved_iova_list,
1869 IOVA_PFN(r->start),
1870 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001871 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001872 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001873 return -ENODEV;
1874 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001875 }
1876 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001877 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001878}
1879
1880static void domain_reserve_special_ranges(struct dmar_domain *domain)
1881{
1882 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1883}
1884
1885static inline int guestwidth_to_adjustwidth(int gaw)
1886{
1887 int agaw;
1888 int r = (gaw - 12) % 9;
1889
1890 if (r == 0)
1891 agaw = gaw;
1892 else
1893 agaw = gaw + 9 - r;
1894 if (agaw > 64)
1895 agaw = 64;
1896 return agaw;
1897}
1898
Joerg Roedeldc534b22015-07-22 12:44:02 +02001899static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1900 int guest_width)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001901{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902 int adjust_width, agaw;
1903 unsigned long sagaw;
Joerg Roedel13cf0172017-08-11 11:40:10 +02001904 int err;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001905
Zhen Leiaa3ac942017-09-21 16:52:45 +01001906 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
Joerg Roedel13cf0172017-08-11 11:40:10 +02001907
1908 err = init_iova_flush_queue(&domain->iovad,
1909 iommu_flush_iova, iova_entry_free);
1910 if (err)
1911 return err;
1912
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001913 domain_reserve_special_ranges(domain);
1914
1915 /* calculate AGAW */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001916 if (guest_width > cap_mgaw(iommu->cap))
1917 guest_width = cap_mgaw(iommu->cap);
1918 domain->gaw = guest_width;
1919 adjust_width = guestwidth_to_adjustwidth(guest_width);
1920 agaw = width_to_agaw(adjust_width);
1921 sagaw = cap_sagaw(iommu->cap);
1922 if (!test_bit(agaw, &sagaw)) {
1923 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001924 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001925 agaw = find_next_bit(&sagaw, 5, agaw);
1926 if (agaw >= 5)
1927 return -ENODEV;
1928 }
1929 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930
Weidong Han8e6040972008-12-08 15:49:06 +08001931 if (ecap_coherent(iommu->ecap))
1932 domain->iommu_coherency = 1;
1933 else
1934 domain->iommu_coherency = 0;
1935
Sheng Yang58c610b2009-03-18 15:33:05 +08001936 if (ecap_sc_support(iommu->ecap))
1937 domain->iommu_snooping = 1;
1938 else
1939 domain->iommu_snooping = 0;
1940
David Woodhouse214e39a2014-03-19 10:38:49 +00001941 if (intel_iommu_superpage)
1942 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1943 else
1944 domain->iommu_superpage = 0;
1945
Suresh Siddha4c923d42009-10-02 11:01:24 -07001946 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001947
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001948 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001949 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001950 if (!domain->pgd)
1951 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001952 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001953 return 0;
1954}
1955
1956static void domain_exit(struct dmar_domain *domain)
1957{
David Woodhouseea8ea462014-03-05 17:09:32 +00001958 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001959
1960 /* Domain 0 is reserved, so dont process it */
1961 if (!domain)
1962 return;
1963
Joerg Roedeld160aca2015-07-22 11:52:53 +02001964 /* Remove associated devices and clear attached or cached domains */
1965 rcu_read_lock();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001966 domain_remove_dev_info(domain);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001967 rcu_read_unlock();
Jiang Liu92d03cc2014-02-19 14:07:28 +08001968
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001969 /* destroy iovas */
1970 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001971
David Woodhouseea8ea462014-03-05 17:09:32 +00001972 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001973
David Woodhouseea8ea462014-03-05 17:09:32 +00001974 dma_free_pagelist(freelist);
1975
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001976 free_domain_mem(domain);
1977}
1978
David Woodhouse64ae8922014-03-09 12:52:30 -07001979static int domain_context_mapping_one(struct dmar_domain *domain,
1980 struct intel_iommu *iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02001981 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982{
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02001983 u16 did = domain->iommu_did[iommu->seq_id];
Joerg Roedel28ccce02015-07-21 14:45:31 +02001984 int translation = CONTEXT_TT_MULTI_LEVEL;
1985 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001986 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001987 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001988 struct dma_pte *pgd;
Joerg Roedel55d94042015-07-22 16:50:40 +02001989 int ret, agaw;
Joerg Roedel28ccce02015-07-21 14:45:31 +02001990
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02001991 WARN_ON(did == 0);
1992
Joerg Roedel28ccce02015-07-21 14:45:31 +02001993 if (hw_pass_through && domain_type_is_si(domain))
1994 translation = CONTEXT_TT_PASS_THROUGH;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001995
1996 pr_debug("Set context mapping for %02x:%02x.%d\n",
1997 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001999 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08002000
Joerg Roedel55d94042015-07-22 16:50:40 +02002001 spin_lock_irqsave(&device_domain_lock, flags);
2002 spin_lock(&iommu->lock);
2003
2004 ret = -ENOMEM;
David Woodhouse03ecc322015-02-13 14:35:21 +00002005 context = iommu_context_addr(iommu, bus, devfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002006 if (!context)
Joerg Roedel55d94042015-07-22 16:50:40 +02002007 goto out_unlock;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008
Joerg Roedel55d94042015-07-22 16:50:40 +02002009 ret = 0;
2010 if (context_present(context))
2011 goto out_unlock;
Joerg Roedelcf484d02015-06-12 12:21:46 +02002012
Xunlei Pangaec0e862016-12-05 20:09:07 +08002013 /*
2014 * For kdump cases, old valid entries may be cached due to the
2015 * in-flight DMA and copied pgtable, but there is no unmapping
2016 * behaviour for them, thus we need an explicit cache flush for
2017 * the newly-mapped device. For kdump, at this point, the device
2018 * is supposed to finish reset at its driver probe stage, so no
2019 * in-flight DMA will exist, and we don't need to worry anymore
2020 * hereafter.
2021 */
2022 if (context_copied(context)) {
2023 u16 did_old = context_domain_id(context);
2024
Christos Gkekasb117e032017-10-08 23:33:31 +01002025 if (did_old < cap_ndoms(iommu->cap)) {
Xunlei Pangaec0e862016-12-05 20:09:07 +08002026 iommu->flush.flush_context(iommu, did_old,
2027 (((u16)bus) << 8) | devfn,
2028 DMA_CCMD_MASK_NOBIT,
2029 DMA_CCMD_DEVICE_INVL);
KarimAllah Ahmedf73a7ee2017-05-05 11:39:59 -07002030 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2031 DMA_TLB_DSI_FLUSH);
2032 }
Xunlei Pangaec0e862016-12-05 20:09:07 +08002033 }
2034
Weidong Hanea6606b2008-12-08 23:08:15 +08002035 pgd = domain->pgd;
2036
Joerg Roedelde24e552015-07-21 14:53:04 +02002037 context_clear_entry(context);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002038 context_set_domain_id(context, did);
Weidong Hanea6606b2008-12-08 23:08:15 +08002039
Joerg Roedelde24e552015-07-21 14:53:04 +02002040 /*
2041 * Skip top levels of page tables for iommu which has less agaw
2042 * than default. Unnecessary for PT mode.
2043 */
Yu Zhao93a23a72009-05-18 13:51:37 +08002044 if (translation != CONTEXT_TT_PASS_THROUGH) {
Joerg Roedelde24e552015-07-21 14:53:04 +02002045 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
Joerg Roedel55d94042015-07-22 16:50:40 +02002046 ret = -ENOMEM;
Joerg Roedelde24e552015-07-21 14:53:04 +02002047 pgd = phys_to_virt(dma_pte_addr(pgd));
Joerg Roedel55d94042015-07-22 16:50:40 +02002048 if (!dma_pte_present(pgd))
2049 goto out_unlock;
Joerg Roedelde24e552015-07-21 14:53:04 +02002050 }
2051
David Woodhouse64ae8922014-03-09 12:52:30 -07002052 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002053 if (info && info->ats_supported)
2054 translation = CONTEXT_TT_DEV_IOTLB;
2055 else
2056 translation = CONTEXT_TT_MULTI_LEVEL;
Joerg Roedelde24e552015-07-21 14:53:04 +02002057
Yu Zhao93a23a72009-05-18 13:51:37 +08002058 context_set_address_root(context, virt_to_phys(pgd));
2059 context_set_address_width(context, iommu->agaw);
Joerg Roedelde24e552015-07-21 14:53:04 +02002060 } else {
2061 /*
2062 * In pass through mode, AW must be programmed to
2063 * indicate the largest AGAW value supported by
2064 * hardware. And ASR is ignored by hardware.
2065 */
2066 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08002067 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002068
2069 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00002070 context_set_fault_enable(context);
2071 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08002072 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002073
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002074 /*
2075 * It's a non-present to present mapping. If hardware doesn't cache
2076 * non-present entry we only need to flush the write-buffer. If the
2077 * _does_ cache non-present entries, then it does so in the special
2078 * domain #0, which we have to flush:
2079 */
2080 if (cap_caching_mode(iommu->cap)) {
2081 iommu->flush.flush_context(iommu, 0,
2082 (((u16)bus) << 8) | devfn,
2083 DMA_CCMD_MASK_NOBIT,
2084 DMA_CCMD_DEVICE_INVL);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002085 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002086 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002087 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002088 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002089 iommu_enable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08002090
Joerg Roedel55d94042015-07-22 16:50:40 +02002091 ret = 0;
2092
2093out_unlock:
2094 spin_unlock(&iommu->lock);
2095 spin_unlock_irqrestore(&device_domain_lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08002096
Wei Yang5c365d12016-07-13 13:53:21 +00002097 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098}
2099
Alex Williamson579305f2014-07-03 09:51:43 -06002100struct domain_context_mapping_data {
2101 struct dmar_domain *domain;
2102 struct intel_iommu *iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002103};
2104
2105static int domain_context_mapping_cb(struct pci_dev *pdev,
2106 u16 alias, void *opaque)
2107{
2108 struct domain_context_mapping_data *data = opaque;
2109
2110 return domain_context_mapping_one(data->domain, data->iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002111 PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06002112}
2113
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002114static int
Joerg Roedel28ccce02015-07-21 14:45:31 +02002115domain_context_mapping(struct dmar_domain *domain, struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002116{
David Woodhouse64ae8922014-03-09 12:52:30 -07002117 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002118 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06002119 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002120
David Woodhousee1f167f2014-03-09 15:24:46 -07002121 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002122 if (!iommu)
2123 return -ENODEV;
2124
Alex Williamson579305f2014-07-03 09:51:43 -06002125 if (!dev_is_pci(dev))
Joerg Roedel28ccce02015-07-21 14:45:31 +02002126 return domain_context_mapping_one(domain, iommu, bus, devfn);
Alex Williamson579305f2014-07-03 09:51:43 -06002127
2128 data.domain = domain;
2129 data.iommu = iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002130
2131 return pci_for_each_dma_alias(to_pci_dev(dev),
2132 &domain_context_mapping_cb, &data);
2133}
2134
2135static int domain_context_mapped_cb(struct pci_dev *pdev,
2136 u16 alias, void *opaque)
2137{
2138 struct intel_iommu *iommu = opaque;
2139
2140 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141}
2142
David Woodhousee1f167f2014-03-09 15:24:46 -07002143static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002144{
Weidong Han5331fe62008-12-08 23:00:00 +08002145 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002146 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002147
David Woodhousee1f167f2014-03-09 15:24:46 -07002148 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002149 if (!iommu)
2150 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151
Alex Williamson579305f2014-07-03 09:51:43 -06002152 if (!dev_is_pci(dev))
2153 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002154
Alex Williamson579305f2014-07-03 09:51:43 -06002155 return !pci_for_each_dma_alias(to_pci_dev(dev),
2156 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002157}
2158
Fenghua Yuf5329592009-08-04 15:09:37 -07002159/* Returns a number of VTD pages, but aligned to MM page size */
2160static inline unsigned long aligned_nrpages(unsigned long host_addr,
2161 size_t size)
2162{
2163 host_addr &= ~PAGE_MASK;
2164 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2165}
2166
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002167/* Return largest possible superpage level for a given mapping */
2168static inline int hardware_largepage_caps(struct dmar_domain *domain,
2169 unsigned long iov_pfn,
2170 unsigned long phy_pfn,
2171 unsigned long pages)
2172{
2173 int support, level = 1;
2174 unsigned long pfnmerge;
2175
2176 support = domain->iommu_superpage;
2177
2178 /* To use a large page, the virtual *and* physical addresses
2179 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2180 of them will mean we have to use smaller pages. So just
2181 merge them and check both at once. */
2182 pfnmerge = iov_pfn | phy_pfn;
2183
2184 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2185 pages >>= VTD_STRIDE_SHIFT;
2186 if (!pages)
2187 break;
2188 pfnmerge >>= VTD_STRIDE_SHIFT;
2189 level++;
2190 support--;
2191 }
2192 return level;
2193}
2194
David Woodhouse9051aa02009-06-29 12:30:54 +01002195static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2196 struct scatterlist *sg, unsigned long phys_pfn,
2197 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002198{
2199 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002200 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002201 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002202 unsigned int largepage_lvl = 0;
2203 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002204
Jiang Liu162d1b12014-07-11 14:19:35 +08002205 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002206
2207 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2208 return -EINVAL;
2209
2210 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2211
Jiang Liucc4f14a2014-11-26 09:42:10 +08002212 if (!sg) {
2213 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002214 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2215 }
2216
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002217 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002218 uint64_t tmp;
2219
David Woodhousee1605492009-06-29 11:17:38 +01002220 if (!sg_res) {
Robin Murphy29a90b72017-09-28 15:14:01 +01002221 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2222
Fenghua Yuf5329592009-08-04 15:09:37 -07002223 sg_res = aligned_nrpages(sg->offset, sg->length);
Robin Murphy29a90b72017-09-28 15:14:01 +01002224 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
David Woodhousee1605492009-06-29 11:17:38 +01002225 sg->dma_length = sg->length;
Robin Murphy29a90b72017-09-28 15:14:01 +01002226 pteval = (sg_phys(sg) - pgoff) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002227 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002228 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002229
David Woodhousee1605492009-06-29 11:17:38 +01002230 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002231 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2232
David Woodhouse5cf0a762014-03-19 16:07:49 +00002233 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002234 if (!pte)
2235 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002236 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002237 if (largepage_lvl > 1) {
Christian Zanderba2374f2015-06-10 09:41:45 -07002238 unsigned long nr_superpages, end_pfn;
2239
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002240 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002241 lvl_pages = lvl_to_nr_pages(largepage_lvl);
Christian Zanderba2374f2015-06-10 09:41:45 -07002242
2243 nr_superpages = sg_res / lvl_pages;
2244 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2245
Jiang Liud41a4ad2014-07-11 14:19:34 +08002246 /*
2247 * Ensure that old small page tables are
Christian Zanderba2374f2015-06-10 09:41:45 -07002248 * removed to make room for superpage(s).
David Dillowbc24c572017-06-28 19:42:23 -07002249 * We're adding new large pages, so make sure
2250 * we don't remove their parent tables.
Jiang Liud41a4ad2014-07-11 14:19:34 +08002251 */
David Dillowbc24c572017-06-28 19:42:23 -07002252 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2253 largepage_lvl + 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002254 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002255 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002256 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002257
David Woodhousee1605492009-06-29 11:17:38 +01002258 }
2259 /* We don't need lock here, nobody else
2260 * touches the iova range
2261 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002262 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002263 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002264 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002265 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2266 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002267 if (dumps) {
2268 dumps--;
2269 debug_dma_dump_mappings(NULL);
2270 }
2271 WARN_ON(1);
2272 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002273
2274 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2275
2276 BUG_ON(nr_pages < lvl_pages);
2277 BUG_ON(sg_res < lvl_pages);
2278
2279 nr_pages -= lvl_pages;
2280 iov_pfn += lvl_pages;
2281 phys_pfn += lvl_pages;
2282 pteval += lvl_pages * VTD_PAGE_SIZE;
2283 sg_res -= lvl_pages;
2284
2285 /* If the next PTE would be the first in a new page, then we
2286 need to flush the cache on the entries we've just written.
2287 And then we'll need to recalculate 'pte', so clear it and
2288 let it get set again in the if (!pte) block above.
2289
2290 If we're done (!nr_pages) we need to flush the cache too.
2291
2292 Also if we've been setting superpages, we may need to
2293 recalculate 'pte' and switch back to smaller pages for the
2294 end of the mapping, if the trailing size is not enough to
2295 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002296 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002297 if (!nr_pages || first_pte_in_page(pte) ||
2298 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002299 domain_flush_cache(domain, first_pte,
2300 (void *)pte - (void *)first_pte);
2301 pte = NULL;
2302 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002303
2304 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002305 sg = sg_next(sg);
2306 }
2307 return 0;
2308}
2309
Peter Xu87684fd2018-05-04 10:34:53 +08002310static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2311 struct scatterlist *sg, unsigned long phys_pfn,
2312 unsigned long nr_pages, int prot)
2313{
2314 int ret;
2315 struct intel_iommu *iommu;
2316
2317 /* Do the real mapping first */
2318 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2319 if (ret)
2320 return ret;
2321
2322 /* Notify about the new mapping */
2323 if (domain_type_is_vm(domain)) {
2324 /* VM typed domains can have more than one IOMMUs */
2325 int iommu_id;
2326 for_each_domain_iommu(iommu_id, domain) {
2327 iommu = g_iommus[iommu_id];
2328 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2329 }
2330 } else {
2331 /* General domains only have one IOMMU */
2332 iommu = domain_get_iommu(domain);
2333 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2334 }
2335
2336 return 0;
2337}
2338
David Woodhouse9051aa02009-06-29 12:30:54 +01002339static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2340 struct scatterlist *sg, unsigned long nr_pages,
2341 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002342{
Peter Xu87684fd2018-05-04 10:34:53 +08002343 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
David Woodhouse9051aa02009-06-29 12:30:54 +01002344}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002345
David Woodhouse9051aa02009-06-29 12:30:54 +01002346static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2347 unsigned long phys_pfn, unsigned long nr_pages,
2348 int prot)
2349{
Peter Xu87684fd2018-05-04 10:34:53 +08002350 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002351}
2352
Joerg Roedel2452d9d2015-07-23 16:20:14 +02002353static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002354{
Filippo Sironi50822192017-08-31 10:58:11 +02002355 unsigned long flags;
2356 struct context_entry *context;
2357 u16 did_old;
2358
Weidong Hanc7151a82008-12-08 22:51:37 +08002359 if (!iommu)
2360 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002361
Filippo Sironi50822192017-08-31 10:58:11 +02002362 spin_lock_irqsave(&iommu->lock, flags);
2363 context = iommu_context_addr(iommu, bus, devfn, 0);
2364 if (!context) {
2365 spin_unlock_irqrestore(&iommu->lock, flags);
2366 return;
2367 }
2368 did_old = context_domain_id(context);
2369 context_clear_entry(context);
2370 __iommu_flush_cache(iommu, context, sizeof(*context));
2371 spin_unlock_irqrestore(&iommu->lock, flags);
2372 iommu->flush.flush_context(iommu,
2373 did_old,
2374 (((u16)bus) << 8) | devfn,
2375 DMA_CCMD_MASK_NOBIT,
2376 DMA_CCMD_DEVICE_INVL);
2377 iommu->flush.flush_iotlb(iommu,
2378 did_old,
2379 0,
2380 0,
2381 DMA_TLB_DSI_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002382}
2383
David Woodhouse109b9b02012-05-25 17:43:02 +01002384static inline void unlink_domain_info(struct device_domain_info *info)
2385{
2386 assert_spin_locked(&device_domain_lock);
2387 list_del(&info->link);
2388 list_del(&info->global);
2389 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002390 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002391}
2392
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002393static void domain_remove_dev_info(struct dmar_domain *domain)
2394{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002395 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002396 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397
2398 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel76f45fe2015-07-21 18:25:11 +02002399 list_for_each_entry_safe(info, tmp, &domain->devices, link)
Joerg Roedel127c7612015-07-23 17:44:46 +02002400 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002401 spin_unlock_irqrestore(&device_domain_lock, flags);
2402}
2403
2404/*
2405 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002406 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002407 */
David Woodhouse1525a292014-03-06 16:19:30 +00002408static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002409{
2410 struct device_domain_info *info;
2411
2412 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002413 info = dev->archdata.iommu;
Peter Xub316d022017-05-22 18:28:51 +08002414 if (likely(info))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002415 return info->domain;
2416 return NULL;
2417}
2418
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002419static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002420dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2421{
2422 struct device_domain_info *info;
2423
2424 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002425 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002426 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002427 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002428
2429 return NULL;
2430}
2431
Joerg Roedel5db31562015-07-22 12:40:43 +02002432static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2433 int bus, int devfn,
2434 struct device *dev,
2435 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002436{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002437 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002438 struct device_domain_info *info;
2439 unsigned long flags;
Joerg Roedeld160aca2015-07-22 11:52:53 +02002440 int ret;
Jiang Liu745f2582014-02-19 14:07:26 +08002441
2442 info = alloc_devinfo_mem();
2443 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002444 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002445
Jiang Liu745f2582014-02-19 14:07:26 +08002446 info->bus = bus;
2447 info->devfn = devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002448 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2449 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2450 info->ats_qdep = 0;
Jiang Liu745f2582014-02-19 14:07:26 +08002451 info->dev = dev;
2452 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002453 info->iommu = iommu;
Lu Baolucc580e42018-07-14 15:46:59 +08002454 info->pasid_table = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002455
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002456 if (dev && dev_is_pci(dev)) {
2457 struct pci_dev *pdev = to_pci_dev(info->dev);
2458
Gil Kupfercef74402018-05-10 17:56:02 -05002459 if (!pci_ats_disabled() &&
2460 ecap_dev_iotlb_support(iommu->ecap) &&
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002461 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2462 dmar_find_matched_atsr_unit(pdev))
2463 info->ats_supported = 1;
2464
2465 if (ecs_enabled(iommu)) {
2466 if (pasid_enabled(iommu)) {
2467 int features = pci_pasid_features(pdev);
2468 if (features >= 0)
2469 info->pasid_supported = features | 1;
2470 }
2471
2472 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2473 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2474 info->pri_supported = 1;
2475 }
2476 }
2477
Jiang Liu745f2582014-02-19 14:07:26 +08002478 spin_lock_irqsave(&device_domain_lock, flags);
2479 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002480 found = find_domain(dev);
Joerg Roedelf303e502015-07-23 18:37:13 +02002481
2482 if (!found) {
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002483 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002484 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
Joerg Roedelf303e502015-07-23 18:37:13 +02002485 if (info2) {
2486 found = info2->domain;
2487 info2->dev = dev;
2488 }
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002489 }
Joerg Roedelf303e502015-07-23 18:37:13 +02002490
Jiang Liu745f2582014-02-19 14:07:26 +08002491 if (found) {
2492 spin_unlock_irqrestore(&device_domain_lock, flags);
2493 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002494 /* Caller must free the original domain */
2495 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002496 }
2497
Joerg Roedeld160aca2015-07-22 11:52:53 +02002498 spin_lock(&iommu->lock);
2499 ret = domain_attach_iommu(domain, iommu);
2500 spin_unlock(&iommu->lock);
2501
2502 if (ret) {
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002503 spin_unlock_irqrestore(&device_domain_lock, flags);
Sudip Mukherjee499f3aa2015-09-18 16:27:07 +05302504 free_devinfo_mem(info);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002505 return NULL;
2506 }
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002507
David Woodhouseb718cd32014-03-09 13:11:33 -07002508 list_add(&info->link, &domain->devices);
2509 list_add(&info->global, &device_domain_list);
2510 if (dev)
2511 dev->archdata.iommu = info;
Lu Baolua7fc93f2018-07-14 15:47:00 +08002512
2513 if (dev && dev_is_pci(dev) && info->pasid_supported) {
2514 ret = intel_pasid_alloc_table(dev);
2515 if (ret) {
2516 __dmar_remove_one_dev_info(info);
2517 spin_unlock_irqrestore(&device_domain_lock, flags);
2518 return NULL;
2519 }
2520 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002521 spin_unlock_irqrestore(&device_domain_lock, flags);
2522
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002523 if (dev && domain_context_mapping(domain, dev)) {
2524 pr_err("Domain context map for %s failed\n", dev_name(dev));
Joerg Roedele6de0f82015-07-22 16:30:36 +02002525 dmar_remove_one_dev_info(domain, dev);
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002526 return NULL;
2527 }
2528
David Woodhouseb718cd32014-03-09 13:11:33 -07002529 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002530}
2531
Alex Williamson579305f2014-07-03 09:51:43 -06002532static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2533{
2534 *(u16 *)opaque = alias;
2535 return 0;
2536}
2537
Joerg Roedel76208352016-08-25 14:25:12 +02002538static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002539{
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002540 struct device_domain_info *info = NULL;
Joerg Roedel76208352016-08-25 14:25:12 +02002541 struct dmar_domain *domain = NULL;
Alex Williamson579305f2014-07-03 09:51:43 -06002542 struct intel_iommu *iommu;
Lu Baolufcc35c62018-05-04 13:08:17 +08002543 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002544 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002545 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002546
David Woodhouse146922e2014-03-09 15:44:17 -07002547 iommu = device_to_iommu(dev, &bus, &devfn);
2548 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002549 return NULL;
2550
2551 if (dev_is_pci(dev)) {
2552 struct pci_dev *pdev = to_pci_dev(dev);
2553
2554 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2555
2556 spin_lock_irqsave(&device_domain_lock, flags);
2557 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2558 PCI_BUS_NUM(dma_alias),
2559 dma_alias & 0xff);
2560 if (info) {
2561 iommu = info->iommu;
2562 domain = info->domain;
2563 }
2564 spin_unlock_irqrestore(&device_domain_lock, flags);
2565
Joerg Roedel76208352016-08-25 14:25:12 +02002566 /* DMA alias already has a domain, use it */
Alex Williamson579305f2014-07-03 09:51:43 -06002567 if (info)
Joerg Roedel76208352016-08-25 14:25:12 +02002568 goto out;
Alex Williamson579305f2014-07-03 09:51:43 -06002569 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002570
David Woodhouse146922e2014-03-09 15:44:17 -07002571 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002572 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002573 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002574 return NULL;
Joerg Roedeldc534b22015-07-22 12:44:02 +02002575 if (domain_init(domain, iommu, gaw)) {
Alex Williamson579305f2014-07-03 09:51:43 -06002576 domain_exit(domain);
2577 return NULL;
2578 }
2579
Joerg Roedel76208352016-08-25 14:25:12 +02002580out:
Alex Williamson579305f2014-07-03 09:51:43 -06002581
Joerg Roedel76208352016-08-25 14:25:12 +02002582 return domain;
2583}
2584
2585static struct dmar_domain *set_domain_for_dev(struct device *dev,
2586 struct dmar_domain *domain)
2587{
2588 struct intel_iommu *iommu;
2589 struct dmar_domain *tmp;
2590 u16 req_id, dma_alias;
2591 u8 bus, devfn;
2592
2593 iommu = device_to_iommu(dev, &bus, &devfn);
2594 if (!iommu)
2595 return NULL;
2596
2597 req_id = ((u16)bus << 8) | devfn;
2598
2599 if (dev_is_pci(dev)) {
2600 struct pci_dev *pdev = to_pci_dev(dev);
2601
2602 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2603
2604 /* register PCI DMA alias device */
2605 if (req_id != dma_alias) {
2606 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2607 dma_alias & 0xff, NULL, domain);
2608
2609 if (!tmp || tmp != domain)
2610 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002611 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002612 }
2613
Joerg Roedel5db31562015-07-22 12:40:43 +02002614 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
Joerg Roedel76208352016-08-25 14:25:12 +02002615 if (!tmp || tmp != domain)
2616 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002617
Joerg Roedel76208352016-08-25 14:25:12 +02002618 return domain;
2619}
2620
2621static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2622{
2623 struct dmar_domain *domain, *tmp;
2624
2625 domain = find_domain(dev);
2626 if (domain)
2627 goto out;
2628
2629 domain = find_or_alloc_domain(dev, gaw);
2630 if (!domain)
2631 goto out;
2632
2633 tmp = set_domain_for_dev(dev, domain);
2634 if (!tmp || domain != tmp) {
Alex Williamson579305f2014-07-03 09:51:43 -06002635 domain_exit(domain);
2636 domain = tmp;
2637 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002638
Joerg Roedel76208352016-08-25 14:25:12 +02002639out:
2640
David Woodhouseb718cd32014-03-09 13:11:33 -07002641 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642}
2643
David Woodhouseb2132032009-06-26 18:50:28 +01002644static int iommu_domain_identity_map(struct dmar_domain *domain,
2645 unsigned long long start,
2646 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002647{
David Woodhousec5395d52009-06-28 16:35:56 +01002648 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2649 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002650
David Woodhousec5395d52009-06-28 16:35:56 +01002651 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2652 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002653 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002654 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002655 }
2656
Joerg Roedelaf1089c2015-07-21 15:45:19 +02002657 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002658 /*
2659 * RMRR range might have overlap with physical memory range,
2660 * clear it first
2661 */
David Woodhousec5395d52009-06-28 16:35:56 +01002662 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002663
Peter Xu87684fd2018-05-04 10:34:53 +08002664 return __domain_mapping(domain, first_vpfn, NULL,
2665 first_vpfn, last_vpfn - first_vpfn + 1,
2666 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002667}
2668
Joerg Roedeld66ce542015-09-23 19:00:10 +02002669static int domain_prepare_identity_map(struct device *dev,
2670 struct dmar_domain *domain,
2671 unsigned long long start,
2672 unsigned long long end)
David Woodhouseb2132032009-06-26 18:50:28 +01002673{
David Woodhouse19943b02009-08-04 16:19:20 +01002674 /* For _hardware_ passthrough, don't bother. But for software
2675 passthrough, we do it anyway -- it may indicate a memory
2676 range which is reserved in E820, so which didn't get set
2677 up to start with in si_domain */
2678 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002679 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2680 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002681 return 0;
2682 }
2683
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002684 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2685 dev_name(dev), start, end);
2686
David Woodhouse5595b522009-12-02 09:21:55 +00002687 if (end < start) {
2688 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2689 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2690 dmi_get_system_info(DMI_BIOS_VENDOR),
2691 dmi_get_system_info(DMI_BIOS_VERSION),
2692 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002693 return -EIO;
David Woodhouse5595b522009-12-02 09:21:55 +00002694 }
2695
David Woodhouse2ff729f2009-08-26 14:25:41 +01002696 if (end >> agaw_to_width(domain->agaw)) {
2697 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2698 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2699 agaw_to_width(domain->agaw),
2700 dmi_get_system_info(DMI_BIOS_VENDOR),
2701 dmi_get_system_info(DMI_BIOS_VERSION),
2702 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002703 return -EIO;
David Woodhouse2ff729f2009-08-26 14:25:41 +01002704 }
David Woodhouse19943b02009-08-04 16:19:20 +01002705
Joerg Roedeld66ce542015-09-23 19:00:10 +02002706 return iommu_domain_identity_map(domain, start, end);
2707}
2708
2709static int iommu_prepare_identity_map(struct device *dev,
2710 unsigned long long start,
2711 unsigned long long end)
2712{
2713 struct dmar_domain *domain;
2714 int ret;
2715
2716 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2717 if (!domain)
2718 return -ENOMEM;
2719
2720 ret = domain_prepare_identity_map(dev, domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002721 if (ret)
Joerg Roedeld66ce542015-09-23 19:00:10 +02002722 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002723
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002724 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002725}
2726
2727static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002728 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002729{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002730 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002731 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002732 return iommu_prepare_identity_map(dev, rmrr->base_address,
2733 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002734}
2735
Suresh Siddhad3f13812011-08-23 17:05:25 -07002736#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002737static inline void iommu_prepare_isa(void)
2738{
2739 struct pci_dev *pdev;
2740 int ret;
2741
2742 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2743 if (!pdev)
2744 return;
2745
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002746 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002747 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002748
2749 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002750 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002751
Yijing Wang9b27e822014-05-20 20:37:52 +08002752 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002753}
2754#else
2755static inline void iommu_prepare_isa(void)
2756{
2757 return;
2758}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002759#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002760
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002761static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002762
Matt Kraai071e1372009-08-23 22:30:22 -07002763static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002764{
David Woodhousec7ab48d2009-06-26 19:10:36 +01002765 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002766
Jiang Liuab8dfe22014-07-11 14:19:27 +08002767 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002768 if (!si_domain)
2769 return -EFAULT;
2770
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002771 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2772 domain_exit(si_domain);
2773 return -EFAULT;
2774 }
2775
Joerg Roedel0dc79712015-07-21 15:40:06 +02002776 pr_debug("Identity mapping domain allocated\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002777
David Woodhouse19943b02009-08-04 16:19:20 +01002778 if (hw)
2779 return 0;
2780
David Woodhousec7ab48d2009-06-26 19:10:36 +01002781 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002782 unsigned long start_pfn, end_pfn;
2783 int i;
2784
2785 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2786 ret = iommu_domain_identity_map(si_domain,
2787 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2788 if (ret)
2789 return ret;
2790 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002791 }
2792
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002793 return 0;
2794}
2795
David Woodhouse9b226622014-03-09 14:03:28 -07002796static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002797{
2798 struct device_domain_info *info;
2799
2800 if (likely(!iommu_identity_mapping))
2801 return 0;
2802
David Woodhouse9b226622014-03-09 14:03:28 -07002803 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002804 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2805 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002806
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002807 return 0;
2808}
2809
Joerg Roedel28ccce02015-07-21 14:45:31 +02002810static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002811{
David Woodhouse0ac72662014-03-09 13:19:22 -07002812 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002813 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002814 u8 bus, devfn;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002815
David Woodhouse5913c9b2014-03-09 16:27:31 -07002816 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002817 if (!iommu)
2818 return -ENODEV;
2819
Joerg Roedel5db31562015-07-22 12:40:43 +02002820 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002821 if (ndomain != domain)
2822 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002823
2824 return 0;
2825}
2826
David Woodhouse0b9d9752014-03-09 15:48:15 -07002827static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002828{
2829 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002830 struct device *tmp;
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002831 int i;
2832
Jiang Liu0e242612014-02-19 14:07:34 +08002833 rcu_read_lock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002834 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002835 /*
2836 * Return TRUE if this RMRR contains the device that
2837 * is passed in.
2838 */
2839 for_each_active_dev_scope(rmrr->devices,
2840 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002841 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002842 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002843 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002844 }
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002845 }
Jiang Liu0e242612014-02-19 14:07:34 +08002846 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002847 return false;
2848}
2849
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002850/*
2851 * There are a couple cases where we need to restrict the functionality of
2852 * devices associated with RMRRs. The first is when evaluating a device for
2853 * identity mapping because problems exist when devices are moved in and out
2854 * of domains and their respective RMRR information is lost. This means that
2855 * a device with associated RMRRs will never be in a "passthrough" domain.
2856 * The second is use of the device through the IOMMU API. This interface
2857 * expects to have full control of the IOVA space for the device. We cannot
2858 * satisfy both the requirement that RMRR access is maintained and have an
2859 * unencumbered IOVA space. We also have no ability to quiesce the device's
2860 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2861 * We therefore prevent devices associated with an RMRR from participating in
2862 * the IOMMU API, which eliminates them from device assignment.
2863 *
2864 * In both cases we assume that PCI USB devices with RMRRs have them largely
2865 * for historical reasons and that the RMRR space is not actively used post
2866 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002867 *
2868 * The same exception is made for graphics devices, with the requirement that
2869 * any use of the RMRR regions will be torn down before assigning the device
2870 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002871 */
2872static bool device_is_rmrr_locked(struct device *dev)
2873{
2874 if (!device_has_rmrr(dev))
2875 return false;
2876
2877 if (dev_is_pci(dev)) {
2878 struct pci_dev *pdev = to_pci_dev(dev);
2879
David Woodhouse18436af2015-03-25 15:05:47 +00002880 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002881 return false;
2882 }
2883
2884 return true;
2885}
2886
David Woodhouse3bdb2592014-03-09 16:03:08 -07002887static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002888{
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002889
David Woodhouse3bdb2592014-03-09 16:03:08 -07002890 if (dev_is_pci(dev)) {
2891 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002892
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002893 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002894 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002895
David Woodhouse3bdb2592014-03-09 16:03:08 -07002896 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2897 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002898
David Woodhouse3bdb2592014-03-09 16:03:08 -07002899 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2900 return 1;
2901
2902 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2903 return 0;
2904
2905 /*
2906 * We want to start off with all devices in the 1:1 domain, and
2907 * take them out later if we find they can't access all of memory.
2908 *
2909 * However, we can't do this for PCI devices behind bridges,
2910 * because all PCI devices behind the same bridge will end up
2911 * with the same source-id on their transactions.
2912 *
2913 * Practically speaking, we can't change things around for these
2914 * devices at run-time, because we can't be sure there'll be no
2915 * DMA transactions in flight for any of their siblings.
2916 *
2917 * So PCI devices (unless they're on the root bus) as well as
2918 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2919 * the 1:1 domain, just in _case_ one of their siblings turns out
2920 * not to be able to map all of memory.
2921 */
2922 if (!pci_is_pcie(pdev)) {
2923 if (!pci_is_root_bus(pdev->bus))
2924 return 0;
2925 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2926 return 0;
2927 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2928 return 0;
2929 } else {
2930 if (device_has_rmrr(dev))
2931 return 0;
2932 }
David Woodhouse6941af22009-07-04 18:24:27 +01002933
David Woodhouse3dfc8132009-07-04 19:11:08 +01002934 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002935 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002936 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002937 * take them out of the 1:1 domain later.
2938 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002939 if (!startup) {
2940 /*
2941 * If the device's dma_mask is less than the system's memory
2942 * size then this is not a candidate for identity mapping.
2943 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002944 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002945
David Woodhouse3bdb2592014-03-09 16:03:08 -07002946 if (dev->coherent_dma_mask &&
2947 dev->coherent_dma_mask < dma_mask)
2948 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002949
David Woodhouse3bdb2592014-03-09 16:03:08 -07002950 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002951 }
David Woodhouse6941af22009-07-04 18:24:27 +01002952
2953 return 1;
2954}
2955
David Woodhousecf04eee2014-03-21 16:49:04 +00002956static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2957{
2958 int ret;
2959
2960 if (!iommu_should_identity_map(dev, 1))
2961 return 0;
2962
Joerg Roedel28ccce02015-07-21 14:45:31 +02002963 ret = domain_add_dev_info(si_domain, dev);
David Woodhousecf04eee2014-03-21 16:49:04 +00002964 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002965 pr_info("%s identity mapping for device %s\n",
2966 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002967 else if (ret == -ENODEV)
2968 /* device not associated with an iommu */
2969 ret = 0;
2970
2971 return ret;
2972}
2973
2974
Matt Kraai071e1372009-08-23 22:30:22 -07002975static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002976{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002977 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002978 struct dmar_drhd_unit *drhd;
2979 struct intel_iommu *iommu;
2980 struct device *dev;
2981 int i;
2982 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002983
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002984 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002985 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2986 if (ret)
2987 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002988 }
2989
David Woodhousecf04eee2014-03-21 16:49:04 +00002990 for_each_active_iommu(iommu, drhd)
2991 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2992 struct acpi_device_physical_node *pn;
2993 struct acpi_device *adev;
2994
2995 if (dev->bus != &acpi_bus_type)
2996 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002997
David Woodhousecf04eee2014-03-21 16:49:04 +00002998 adev= to_acpi_device(dev);
2999 mutex_lock(&adev->physical_node_lock);
3000 list_for_each_entry(pn, &adev->physical_node_list, node) {
3001 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
3002 if (ret)
3003 break;
3004 }
3005 mutex_unlock(&adev->physical_node_lock);
3006 if (ret)
3007 return ret;
3008 }
3009
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003010 return 0;
3011}
3012
Jiang Liuffebeb42014-11-09 22:48:02 +08003013static void intel_iommu_init_qi(struct intel_iommu *iommu)
3014{
3015 /*
3016 * Start from the sane iommu hardware state.
3017 * If the queued invalidation is already initialized by us
3018 * (for example, while enabling interrupt-remapping) then
3019 * we got the things already rolling from a sane state.
3020 */
3021 if (!iommu->qi) {
3022 /*
3023 * Clear any previous faults.
3024 */
3025 dmar_fault(-1, iommu);
3026 /*
3027 * Disable queued invalidation if supported and already enabled
3028 * before OS handover.
3029 */
3030 dmar_disable_qi(iommu);
3031 }
3032
3033 if (dmar_enable_qi(iommu)) {
3034 /*
3035 * Queued Invalidate not enabled, use Register Based Invalidate
3036 */
3037 iommu->flush.flush_context = __iommu_flush_context;
3038 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003039 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003040 iommu->name);
3041 } else {
3042 iommu->flush.flush_context = qi_flush_context;
3043 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003044 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08003045 }
3046}
3047
Joerg Roedel091d42e2015-06-12 11:56:10 +02003048static int copy_context_table(struct intel_iommu *iommu,
Dan Williamsdfddb962015-10-09 18:16:46 -04003049 struct root_entry *old_re,
Joerg Roedel091d42e2015-06-12 11:56:10 +02003050 struct context_entry **tbl,
3051 int bus, bool ext)
3052{
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003053 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003054 struct context_entry *new_ce = NULL, ce;
Dan Williamsdfddb962015-10-09 18:16:46 -04003055 struct context_entry *old_ce = NULL;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003056 struct root_entry re;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003057 phys_addr_t old_ce_phys;
3058
3059 tbl_idx = ext ? bus * 2 : bus;
Dan Williamsdfddb962015-10-09 18:16:46 -04003060 memcpy(&re, old_re, sizeof(re));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003061
3062 for (devfn = 0; devfn < 256; devfn++) {
3063 /* First calculate the correct index */
3064 idx = (ext ? devfn * 2 : devfn) % 256;
3065
3066 if (idx == 0) {
3067 /* First save what we may have and clean up */
3068 if (new_ce) {
3069 tbl[tbl_idx] = new_ce;
3070 __iommu_flush_cache(iommu, new_ce,
3071 VTD_PAGE_SIZE);
3072 pos = 1;
3073 }
3074
3075 if (old_ce)
3076 iounmap(old_ce);
3077
3078 ret = 0;
3079 if (devfn < 0x80)
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003080 old_ce_phys = root_entry_lctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003081 else
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003082 old_ce_phys = root_entry_uctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003083
3084 if (!old_ce_phys) {
3085 if (ext && devfn == 0) {
3086 /* No LCTP, try UCTP */
3087 devfn = 0x7f;
3088 continue;
3089 } else {
3090 goto out;
3091 }
3092 }
3093
3094 ret = -ENOMEM;
Dan Williamsdfddb962015-10-09 18:16:46 -04003095 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3096 MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003097 if (!old_ce)
3098 goto out;
3099
3100 new_ce = alloc_pgtable_page(iommu->node);
3101 if (!new_ce)
3102 goto out_unmap;
3103
3104 ret = 0;
3105 }
3106
3107 /* Now copy the context entry */
Dan Williamsdfddb962015-10-09 18:16:46 -04003108 memcpy(&ce, old_ce + idx, sizeof(ce));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003109
Joerg Roedelcf484d02015-06-12 12:21:46 +02003110 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02003111 continue;
3112
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003113 did = context_domain_id(&ce);
3114 if (did >= 0 && did < cap_ndoms(iommu->cap))
3115 set_bit(did, iommu->domain_ids);
3116
Joerg Roedelcf484d02015-06-12 12:21:46 +02003117 /*
3118 * We need a marker for copied context entries. This
3119 * marker needs to work for the old format as well as
3120 * for extended context entries.
3121 *
3122 * Bit 67 of the context entry is used. In the old
3123 * format this bit is available to software, in the
3124 * extended format it is the PGE bit, but PGE is ignored
3125 * by HW if PASIDs are disabled (and thus still
3126 * available).
3127 *
3128 * So disable PASIDs first and then mark the entry
3129 * copied. This means that we don't copy PASID
3130 * translations from the old kernel, but this is fine as
3131 * faults there are not fatal.
3132 */
3133 context_clear_pasid_enable(&ce);
3134 context_set_copied(&ce);
3135
Joerg Roedel091d42e2015-06-12 11:56:10 +02003136 new_ce[idx] = ce;
3137 }
3138
3139 tbl[tbl_idx + pos] = new_ce;
3140
3141 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3142
3143out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003144 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003145
3146out:
3147 return ret;
3148}
3149
3150static int copy_translation_tables(struct intel_iommu *iommu)
3151{
3152 struct context_entry **ctxt_tbls;
Dan Williamsdfddb962015-10-09 18:16:46 -04003153 struct root_entry *old_rt;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003154 phys_addr_t old_rt_phys;
3155 int ctxt_table_entries;
3156 unsigned long flags;
3157 u64 rtaddr_reg;
3158 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02003159 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003160
3161 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3162 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02003163 new_ext = !!ecap_ecs(iommu->ecap);
3164
3165 /*
3166 * The RTT bit can only be changed when translation is disabled,
3167 * but disabling translation means to open a window for data
3168 * corruption. So bail out and don't copy anything if we would
3169 * have to change the bit.
3170 */
3171 if (new_ext != ext)
3172 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003173
3174 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3175 if (!old_rt_phys)
3176 return -EINVAL;
3177
Dan Williamsdfddb962015-10-09 18:16:46 -04003178 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003179 if (!old_rt)
3180 return -ENOMEM;
3181
3182 /* This is too big for the stack - allocate it from slab */
3183 ctxt_table_entries = ext ? 512 : 256;
3184 ret = -ENOMEM;
Kees Cook6396bb22018-06-12 14:03:40 -07003185 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003186 if (!ctxt_tbls)
3187 goto out_unmap;
3188
3189 for (bus = 0; bus < 256; bus++) {
3190 ret = copy_context_table(iommu, &old_rt[bus],
3191 ctxt_tbls, bus, ext);
3192 if (ret) {
3193 pr_err("%s: Failed to copy context table for bus %d\n",
3194 iommu->name, bus);
3195 continue;
3196 }
3197 }
3198
3199 spin_lock_irqsave(&iommu->lock, flags);
3200
3201 /* Context tables are copied, now write them to the root_entry table */
3202 for (bus = 0; bus < 256; bus++) {
3203 int idx = ext ? bus * 2 : bus;
3204 u64 val;
3205
3206 if (ctxt_tbls[idx]) {
3207 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3208 iommu->root_entry[bus].lo = val;
3209 }
3210
3211 if (!ext || !ctxt_tbls[idx + 1])
3212 continue;
3213
3214 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3215 iommu->root_entry[bus].hi = val;
3216 }
3217
3218 spin_unlock_irqrestore(&iommu->lock, flags);
3219
3220 kfree(ctxt_tbls);
3221
3222 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3223
3224 ret = 0;
3225
3226out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003227 memunmap(old_rt);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003228
3229 return ret;
3230}
3231
Joseph Cihulab7792602011-05-03 00:08:37 -07003232static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003233{
3234 struct dmar_drhd_unit *drhd;
3235 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003236 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003237 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003238 struct intel_iommu *iommu;
Joerg Roedel13cf0172017-08-11 11:40:10 +02003239 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003240
3241 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003242 * for each drhd
3243 * allocate root
3244 * initialize and program root entry to not present
3245 * endfor
3246 */
3247 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003248 /*
3249 * lock not needed as this is only incremented in the single
3250 * threaded kernel __init code path all other access are read
3251 * only
3252 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003253 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003254 g_num_of_iommus++;
3255 continue;
3256 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003257 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003258 }
3259
Jiang Liuffebeb42014-11-09 22:48:02 +08003260 /* Preallocate enough resources for IOMMU hot-addition */
3261 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3262 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3263
Weidong Hand9630fe2008-12-08 11:06:32 +08003264 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3265 GFP_KERNEL);
3266 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003267 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003268 ret = -ENOMEM;
3269 goto error;
3270 }
3271
Jiang Liu7c919772014-01-06 14:18:18 +08003272 for_each_active_iommu(iommu, drhd) {
Lu Baolu56283172018-07-14 15:46:54 +08003273 /*
3274 * Find the max pasid size of all IOMMU's in the system.
3275 * We need to ensure the system pasid table is no bigger
3276 * than the smallest supported.
3277 */
3278 if (pasid_enabled(iommu)) {
3279 u32 temp = 2 << ecap_pss(iommu->ecap);
3280
3281 intel_pasid_max_id = min_t(u32, temp,
3282 intel_pasid_max_id);
3283 }
3284
Weidong Hand9630fe2008-12-08 11:06:32 +08003285 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003286
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003287 intel_iommu_init_qi(iommu);
3288
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003289 ret = iommu_init_domains(iommu);
3290 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003291 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003292
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003293 init_translation_status(iommu);
3294
Joerg Roedel091d42e2015-06-12 11:56:10 +02003295 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3296 iommu_disable_translation(iommu);
3297 clear_translation_pre_enabled(iommu);
3298 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3299 iommu->name);
3300 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003301
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302 /*
3303 * TBD:
3304 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003305 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003306 */
3307 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003308 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003309 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003310
Joerg Roedel091d42e2015-06-12 11:56:10 +02003311 if (translation_pre_enabled(iommu)) {
3312 pr_info("Translation already enabled - trying to copy translation structures\n");
3313
3314 ret = copy_translation_tables(iommu);
3315 if (ret) {
3316 /*
3317 * We found the IOMMU with translation
3318 * enabled - but failed to copy over the
3319 * old root-entry table. Try to proceed
3320 * by disabling translation now and
3321 * allocating a clean root-entry table.
3322 * This might cause DMAR faults, but
3323 * probably the dump will still succeed.
3324 */
3325 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3326 iommu->name);
3327 iommu_disable_translation(iommu);
3328 clear_translation_pre_enabled(iommu);
3329 } else {
3330 pr_info("Copied translation tables from previous kernel for %s\n",
3331 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003332 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003333 }
3334 }
3335
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003336 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003337 hw_pass_through = 0;
David Woodhouse8a94ade2015-03-24 14:54:56 +00003338#ifdef CONFIG_INTEL_IOMMU_SVM
3339 if (pasid_enabled(iommu))
Lu Baolud9737952018-07-14 15:47:02 +08003340 intel_svm_init(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00003341#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003342 }
3343
Joerg Roedela4c34ff2016-06-17 11:29:48 +02003344 /*
3345 * Now that qi is enabled on all iommus, set the root entry and flush
3346 * caches. This is required on some Intel X58 chipsets, otherwise the
3347 * flush_context function will loop forever and the boot hangs.
3348 */
3349 for_each_active_iommu(iommu, drhd) {
3350 iommu_flush_write_buffer(iommu);
3351 iommu_set_root_entry(iommu);
3352 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3353 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3354 }
3355
David Woodhouse19943b02009-08-04 16:19:20 +01003356 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003357 iommu_identity_mapping |= IDENTMAP_ALL;
3358
Suresh Siddhad3f13812011-08-23 17:05:25 -07003359#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003360 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003361#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003362
Ashok Raj21e722c2017-01-30 09:39:53 -08003363 check_tylersburg_isoch();
3364
Joerg Roedel86080cc2015-06-12 12:27:16 +02003365 if (iommu_identity_mapping) {
3366 ret = si_domain_init(hw_pass_through);
3367 if (ret)
3368 goto free_iommu;
3369 }
3370
David Woodhousee0fc7e02009-09-30 09:12:17 -07003371
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003372 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003373 * If we copied translations from a previous kernel in the kdump
3374 * case, we can not assign the devices to domains now, as that
3375 * would eliminate the old mappings. So skip this part and defer
3376 * the assignment to device driver initialization time.
3377 */
3378 if (copied_tables)
3379 goto domains_done;
3380
3381 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003382 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003383 * identity mappings for rmrr, gfx, and isa and may fall back to static
3384 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003385 */
David Woodhouse19943b02009-08-04 16:19:20 +01003386 if (iommu_identity_mapping) {
3387 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3388 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003389 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003390 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003391 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003392 }
David Woodhouse19943b02009-08-04 16:19:20 +01003393 /*
3394 * For each rmrr
3395 * for each dev attached to rmrr
3396 * do
3397 * locate drhd for dev, alloc domain for dev
3398 * allocate free domain
3399 * allocate page table entries for rmrr
3400 * if context not allocated for bus
3401 * allocate and init context
3402 * set present in root table for this bus
3403 * init context with domain, translation etc
3404 * endfor
3405 * endfor
3406 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003407 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003408 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003409 /* some BIOS lists non-exist devices in DMAR table. */
3410 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003411 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003412 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003413 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003414 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003415 }
3416 }
3417
3418 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003419
Joerg Roedela87f4912015-06-12 12:32:54 +02003420domains_done:
3421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003422 /*
3423 * for each drhd
3424 * enable fault log
3425 * global invalidate context cache
3426 * global invalidate iotlb
3427 * enable translation
3428 */
Jiang Liu7c919772014-01-06 14:18:18 +08003429 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003430 if (drhd->ignored) {
3431 /*
3432 * we always have to disable PMRs or DMA may fail on
3433 * this device
3434 */
3435 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003436 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003437 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003438 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003439
3440 iommu_flush_write_buffer(iommu);
3441
David Woodhousea222a7f2015-10-07 23:35:18 +01003442#ifdef CONFIG_INTEL_IOMMU_SVM
3443 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3444 ret = intel_svm_enable_prq(iommu);
3445 if (ret)
3446 goto free_iommu;
3447 }
3448#endif
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003449 ret = dmar_set_interrupt(iommu);
3450 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003451 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003452
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003453 if (!translation_pre_enabled(iommu))
3454 iommu_enable_translation(iommu);
3455
David Woodhouseb94996c2009-09-19 15:28:12 -07003456 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003457 }
3458
3459 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003460
3461free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003462 for_each_active_iommu(iommu, drhd) {
3463 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003464 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003465 }
Joerg Roedel13cf0172017-08-11 11:40:10 +02003466
Weidong Hand9630fe2008-12-08 11:06:32 +08003467 kfree(g_iommus);
Joerg Roedel13cf0172017-08-11 11:40:10 +02003468
Jiang Liu989d51f2014-02-19 14:07:21 +08003469error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003470 return ret;
3471}
3472
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003473/* This takes a number of _MM_ pages, not VTD pages */
Omer Peleg2aac6302016-04-20 11:33:57 +03003474static unsigned long intel_alloc_iova(struct device *dev,
David Woodhouse875764d2009-06-28 21:20:51 +01003475 struct dmar_domain *domain,
3476 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003477{
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003478 unsigned long iova_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003479
David Woodhouse875764d2009-06-28 21:20:51 +01003480 /* Restrict dma_mask to the width that the iommu can handle */
3481 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
Robin Murphy8f6429c2015-07-16 19:40:12 +01003482 /* Ensure we reserve the whole size-aligned region */
3483 nrpages = __roundup_pow_of_two(nrpages);
David Woodhouse875764d2009-06-28 21:20:51 +01003484
3485 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003486 /*
3487 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003488 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003489 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003490 */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003491 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003492 IOVA_PFN(DMA_BIT_MASK(32)), false);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003493 if (iova_pfn)
3494 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003495 }
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003496 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3497 IOVA_PFN(dma_mask), true);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003498 if (unlikely(!iova_pfn)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003499 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003500 nrpages, dev_name(dev));
Omer Peleg2aac6302016-04-20 11:33:57 +03003501 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003502 }
3503
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003504 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003505}
3506
Lu Baolu9ddbfb42018-07-14 15:46:57 +08003507struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003508{
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003509 struct dmar_domain *domain, *tmp;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003510 struct dmar_rmrr_unit *rmrr;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003511 struct device *i_dev;
3512 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003513
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003514 domain = find_domain(dev);
3515 if (domain)
3516 goto out;
3517
3518 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3519 if (!domain)
3520 goto out;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003521
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003522 /* We have a new domain - setup possible RMRRs for the device */
3523 rcu_read_lock();
3524 for_each_rmrr_units(rmrr) {
3525 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3526 i, i_dev) {
3527 if (i_dev != dev)
3528 continue;
3529
3530 ret = domain_prepare_identity_map(dev, domain,
3531 rmrr->base_address,
3532 rmrr->end_address);
3533 if (ret)
3534 dev_err(dev, "Mapping reserved region failed\n");
3535 }
3536 }
3537 rcu_read_unlock();
3538
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003539 tmp = set_domain_for_dev(dev, domain);
3540 if (!tmp || domain != tmp) {
3541 domain_exit(domain);
3542 domain = tmp;
3543 }
3544
3545out:
3546
3547 if (!domain)
3548 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3549
3550
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003551 return domain;
3552}
3553
David Woodhouseecb509e2014-03-09 16:29:55 -07003554/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003555static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003556{
3557 int found;
3558
David Woodhouse3d891942014-03-06 15:59:26 +00003559 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003560 return 1;
3561
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003562 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003563 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003564
David Woodhouse9b226622014-03-09 14:03:28 -07003565 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003566 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003567 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003568 return 1;
3569 else {
3570 /*
3571 * 32 bit DMA is removed from si_domain and fall back
3572 * to non-identity mapping.
3573 */
Joerg Roedele6de0f82015-07-22 16:30:36 +02003574 dmar_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003575 pr_info("32bit %s uses non-identity mapping\n",
3576 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003577 return 0;
3578 }
3579 } else {
3580 /*
3581 * In case of a detached 64 bit DMA device from vm, the device
3582 * is put into si_domain for identity mapping.
3583 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003584 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003585 int ret;
Joerg Roedel28ccce02015-07-21 14:45:31 +02003586 ret = domain_add_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003587 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003588 pr_info("64bit %s uses identity mapping\n",
3589 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003590 return 1;
3591 }
3592 }
3593 }
3594
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003595 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003596}
3597
David Woodhouse5040a912014-03-09 16:14:00 -07003598static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003599 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003600{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003601 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003602 phys_addr_t start_paddr;
Omer Peleg2aac6302016-04-20 11:33:57 +03003603 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003604 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003605 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003606 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003607 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003608
3609 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003610
David Woodhouse5040a912014-03-09 16:14:00 -07003611 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003612 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003613
David Woodhouse5040a912014-03-09 16:14:00 -07003614 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003615 if (!domain)
3616 return 0;
3617
Weidong Han8c11e792008-12-08 15:29:22 +08003618 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003619 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003620
Omer Peleg2aac6302016-04-20 11:33:57 +03003621 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3622 if (!iova_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003623 goto error;
3624
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003625 /*
3626 * Check if DMAR supports zero-length reads on write only
3627 * mappings..
3628 */
3629 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003630 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003631 prot |= DMA_PTE_READ;
3632 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3633 prot |= DMA_PTE_WRITE;
3634 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003635 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003636 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003637 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003638 * is not a big problem
3639 */
Omer Peleg2aac6302016-04-20 11:33:57 +03003640 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003641 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003642 if (ret)
3643 goto error;
3644
Omer Peleg2aac6302016-04-20 11:33:57 +03003645 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
David Woodhouse03d6a242009-06-28 15:33:46 +01003646 start_paddr += paddr & ~PAGE_MASK;
3647 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003648
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003649error:
Omer Peleg2aac6302016-04-20 11:33:57 +03003650 if (iova_pfn)
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003651 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003652 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003653 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003654 return 0;
3655}
3656
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003657static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3658 unsigned long offset, size_t size,
3659 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003660 unsigned long attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003661{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003662 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003663 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003664}
3665
Omer Peleg769530e2016-04-20 11:33:25 +03003666static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003667{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003668 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003669 unsigned long start_pfn, last_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003670 unsigned long nrpages;
Omer Peleg2aac6302016-04-20 11:33:57 +03003671 unsigned long iova_pfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003672 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003673 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003674
David Woodhouse73676832009-07-04 14:08:36 +01003675 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003676 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003677
David Woodhouse1525a292014-03-06 16:19:30 +00003678 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003679 BUG_ON(!domain);
3680
Weidong Han8c11e792008-12-08 15:29:22 +08003681 iommu = domain_get_iommu(domain);
3682
Omer Peleg2aac6302016-04-20 11:33:57 +03003683 iova_pfn = IOVA_PFN(dev_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003684
Omer Peleg769530e2016-04-20 11:33:25 +03003685 nrpages = aligned_nrpages(dev_addr, size);
Omer Peleg2aac6302016-04-20 11:33:57 +03003686 start_pfn = mm_to_dma_pfn(iova_pfn);
Omer Peleg769530e2016-04-20 11:33:25 +03003687 last_pfn = start_pfn + nrpages - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003688
David Woodhoused794dc92009-06-28 00:27:49 +01003689 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003690 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003691
David Woodhouseea8ea462014-03-05 17:09:32 +00003692 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003693
mark gross5e0d2a62008-03-04 15:22:08 -08003694 if (intel_iommu_strict) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003695 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003696 nrpages, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003697 /* free iova */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003698 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
David Woodhouseea8ea462014-03-05 17:09:32 +00003699 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003700 } else {
Joerg Roedel13cf0172017-08-11 11:40:10 +02003701 queue_iova(&domain->iovad, iova_pfn, nrpages,
3702 (unsigned long)freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003703 /*
3704 * queue up the release of the unmap to save the 1/6th of the
3705 * cpu used up by the iotlb flush operation...
3706 */
mark gross5e0d2a62008-03-04 15:22:08 -08003707 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003708}
3709
Jiang Liud41a4ad2014-07-11 14:19:34 +08003710static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3711 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003712 unsigned long attrs)
Jiang Liud41a4ad2014-07-11 14:19:34 +08003713{
Omer Peleg769530e2016-04-20 11:33:25 +03003714 intel_unmap(dev, dev_addr, size);
Jiang Liud41a4ad2014-07-11 14:19:34 +08003715}
3716
David Woodhouse5040a912014-03-09 16:14:00 -07003717static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003718 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003719 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003720{
Christoph Hellwigd657c5c2018-03-19 11:38:20 +01003721 void *vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003722
Christoph Hellwigd657c5c2018-03-19 11:38:20 +01003723 vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
3724 if (iommu_no_mapping(dev) || !vaddr)
3725 return vaddr;
Alex Williamsone8bb9102009-11-04 15:59:34 -07003726
Christoph Hellwigd657c5c2018-03-19 11:38:20 +01003727 *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
3728 PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
3729 dev->coherent_dma_mask);
3730 if (!*dma_handle)
3731 goto out_free_pages;
3732 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003733
Christoph Hellwigd657c5c2018-03-19 11:38:20 +01003734out_free_pages:
3735 dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003736 return NULL;
3737}
3738
David Woodhouse5040a912014-03-09 16:14:00 -07003739static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003740 dma_addr_t dma_handle, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003741{
Christoph Hellwigd657c5c2018-03-19 11:38:20 +01003742 if (!iommu_no_mapping(dev))
3743 intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
3744 dma_direct_free(dev, size, vaddr, dma_handle, attrs);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003745}
3746
David Woodhouse5040a912014-03-09 16:14:00 -07003747static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003748 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003749 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003750{
Omer Peleg769530e2016-04-20 11:33:25 +03003751 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3752 unsigned long nrpages = 0;
3753 struct scatterlist *sg;
3754 int i;
3755
3756 for_each_sg(sglist, sg, nelems, i) {
3757 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3758 }
3759
3760 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003761}
3762
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003763static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003764 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003765{
3766 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003767 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003768
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003769 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003770 BUG_ON(!sg_page(sg));
Robin Murphy29a90b72017-09-28 15:14:01 +01003771 sg->dma_address = sg_phys(sg);
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003772 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003773 }
3774 return nelems;
3775}
3776
David Woodhouse5040a912014-03-09 16:14:00 -07003777static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003778 enum dma_data_direction dir, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003779{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003780 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003781 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003782 size_t size = 0;
3783 int prot = 0;
Omer Peleg2aac6302016-04-20 11:33:57 +03003784 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003785 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003786 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003787 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003788 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003789
3790 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003791 if (iommu_no_mapping(dev))
3792 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003793
David Woodhouse5040a912014-03-09 16:14:00 -07003794 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003795 if (!domain)
3796 return 0;
3797
Weidong Han8c11e792008-12-08 15:29:22 +08003798 iommu = domain_get_iommu(domain);
3799
David Woodhouseb536d242009-06-28 14:49:31 +01003800 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003801 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003802
Omer Peleg2aac6302016-04-20 11:33:57 +03003803 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
David Woodhouse5040a912014-03-09 16:14:00 -07003804 *dev->dma_mask);
Omer Peleg2aac6302016-04-20 11:33:57 +03003805 if (!iova_pfn) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003806 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003807 return 0;
3808 }
3809
3810 /*
3811 * Check if DMAR supports zero-length reads on write only
3812 * mappings..
3813 */
3814 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003815 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003816 prot |= DMA_PTE_READ;
3817 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3818 prot |= DMA_PTE_WRITE;
3819
Omer Peleg2aac6302016-04-20 11:33:57 +03003820 start_vpfn = mm_to_dma_pfn(iova_pfn);
David Woodhousee1605492009-06-29 11:17:38 +01003821
Fenghua Yuf5329592009-08-04 15:09:37 -07003822 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003823 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003824 dma_pte_free_pagetable(domain, start_vpfn,
David Dillowbc24c572017-06-28 19:42:23 -07003825 start_vpfn + size - 1,
3826 agaw_to_level(domain->agaw) + 1);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003827 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
David Woodhousee1605492009-06-29 11:17:38 +01003828 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003829 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003830
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003831 return nelems;
3832}
3833
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003834static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3835{
3836 return !dma_addr;
3837}
3838
Arvind Yadav01e19322017-06-28 16:39:32 +05303839const struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003840 .alloc = intel_alloc_coherent,
3841 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003842 .map_sg = intel_map_sg,
3843 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003844 .map_page = intel_map_page,
3845 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003846 .mapping_error = intel_mapping_error,
Christoph Hellwig5860acc2017-05-22 11:38:27 +02003847#ifdef CONFIG_X86
Christoph Hellwigfec777c2018-03-19 11:38:15 +01003848 .dma_supported = dma_direct_supported,
Christoph Hellwig5860acc2017-05-22 11:38:27 +02003849#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003850};
3851
3852static inline int iommu_domain_cache_init(void)
3853{
3854 int ret = 0;
3855
3856 iommu_domain_cache = kmem_cache_create("iommu_domain",
3857 sizeof(struct dmar_domain),
3858 0,
3859 SLAB_HWCACHE_ALIGN,
3860
3861 NULL);
3862 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003863 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003864 ret = -ENOMEM;
3865 }
3866
3867 return ret;
3868}
3869
3870static inline int iommu_devinfo_cache_init(void)
3871{
3872 int ret = 0;
3873
3874 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3875 sizeof(struct device_domain_info),
3876 0,
3877 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003878 NULL);
3879 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003880 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003881 ret = -ENOMEM;
3882 }
3883
3884 return ret;
3885}
3886
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003887static int __init iommu_init_mempool(void)
3888{
3889 int ret;
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003890 ret = iova_cache_get();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003891 if (ret)
3892 return ret;
3893
3894 ret = iommu_domain_cache_init();
3895 if (ret)
3896 goto domain_error;
3897
3898 ret = iommu_devinfo_cache_init();
3899 if (!ret)
3900 return ret;
3901
3902 kmem_cache_destroy(iommu_domain_cache);
3903domain_error:
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003904 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003905
3906 return -ENOMEM;
3907}
3908
3909static void __init iommu_exit_mempool(void)
3910{
3911 kmem_cache_destroy(iommu_devinfo_cache);
3912 kmem_cache_destroy(iommu_domain_cache);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003913 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003914}
3915
Dan Williams556ab452010-07-23 15:47:56 -07003916static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3917{
3918 struct dmar_drhd_unit *drhd;
3919 u32 vtbar;
3920 int rc;
3921
3922 /* We know that this device on this chipset has its own IOMMU.
3923 * If we find it under a different IOMMU, then the BIOS is lying
3924 * to us. Hope that the IOMMU for this device is actually
3925 * disabled, and it needs no translation...
3926 */
3927 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3928 if (rc) {
3929 /* "can't" happen */
3930 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3931 return;
3932 }
3933 vtbar &= 0xffff0000;
3934
3935 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3936 drhd = dmar_find_matched_drhd_unit(pdev);
3937 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3938 TAINT_FIRMWARE_WORKAROUND,
3939 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3940 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3941}
3942DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3943
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003944static void __init init_no_remapping_devices(void)
3945{
3946 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003947 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003948 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003949
3950 for_each_drhd_unit(drhd) {
3951 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003952 for_each_active_dev_scope(drhd->devices,
3953 drhd->devices_cnt, i, dev)
3954 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003955 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003956 if (i == drhd->devices_cnt)
3957 drhd->ignored = 1;
3958 }
3959 }
3960
Jiang Liu7c919772014-01-06 14:18:18 +08003961 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003962 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003963 continue;
3964
Jiang Liub683b232014-02-19 14:07:32 +08003965 for_each_active_dev_scope(drhd->devices,
3966 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003967 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003968 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003969 if (i < drhd->devices_cnt)
3970 continue;
3971
David Woodhousec0771df2011-10-14 20:59:46 +01003972 /* This IOMMU has *only* gfx devices. Either bypass it or
3973 set the gfx_mapped flag, as appropriate */
3974 if (dmar_map_gfx) {
3975 intel_iommu_gfx_mapped = 1;
3976 } else {
3977 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003978 for_each_active_dev_scope(drhd->devices,
3979 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003980 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003981 }
3982 }
3983}
3984
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003985#ifdef CONFIG_SUSPEND
3986static int init_iommu_hw(void)
3987{
3988 struct dmar_drhd_unit *drhd;
3989 struct intel_iommu *iommu = NULL;
3990
3991 for_each_active_iommu(iommu, drhd)
3992 if (iommu->qi)
3993 dmar_reenable_qi(iommu);
3994
Joseph Cihulab7792602011-05-03 00:08:37 -07003995 for_each_iommu(iommu, drhd) {
3996 if (drhd->ignored) {
3997 /*
3998 * we always have to disable PMRs or DMA may fail on
3999 * this device
4000 */
4001 if (force_on)
4002 iommu_disable_protect_mem_regions(iommu);
4003 continue;
4004 }
4005
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004006 iommu_flush_write_buffer(iommu);
4007
4008 iommu_set_root_entry(iommu);
4009
4010 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004011 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08004012 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4013 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07004014 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004015 }
4016
4017 return 0;
4018}
4019
4020static void iommu_flush_all(void)
4021{
4022 struct dmar_drhd_unit *drhd;
4023 struct intel_iommu *iommu;
4024
4025 for_each_active_iommu(iommu, drhd) {
4026 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004027 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004028 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004029 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004030 }
4031}
4032
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004033static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004034{
4035 struct dmar_drhd_unit *drhd;
4036 struct intel_iommu *iommu = NULL;
4037 unsigned long flag;
4038
4039 for_each_active_iommu(iommu, drhd) {
Kees Cook6396bb22018-06-12 14:03:40 -07004040 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004041 GFP_ATOMIC);
4042 if (!iommu->iommu_state)
4043 goto nomem;
4044 }
4045
4046 iommu_flush_all();
4047
4048 for_each_active_iommu(iommu, drhd) {
4049 iommu_disable_translation(iommu);
4050
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004051 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004052
4053 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4054 readl(iommu->reg + DMAR_FECTL_REG);
4055 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4056 readl(iommu->reg + DMAR_FEDATA_REG);
4057 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4058 readl(iommu->reg + DMAR_FEADDR_REG);
4059 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4060 readl(iommu->reg + DMAR_FEUADDR_REG);
4061
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004062 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004063 }
4064 return 0;
4065
4066nomem:
4067 for_each_active_iommu(iommu, drhd)
4068 kfree(iommu->iommu_state);
4069
4070 return -ENOMEM;
4071}
4072
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004073static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004074{
4075 struct dmar_drhd_unit *drhd;
4076 struct intel_iommu *iommu = NULL;
4077 unsigned long flag;
4078
4079 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07004080 if (force_on)
4081 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4082 else
4083 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004084 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004085 }
4086
4087 for_each_active_iommu(iommu, drhd) {
4088
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004089 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004090
4091 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4092 iommu->reg + DMAR_FECTL_REG);
4093 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4094 iommu->reg + DMAR_FEDATA_REG);
4095 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4096 iommu->reg + DMAR_FEADDR_REG);
4097 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4098 iommu->reg + DMAR_FEUADDR_REG);
4099
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004100 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004101 }
4102
4103 for_each_active_iommu(iommu, drhd)
4104 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004105}
4106
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004107static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004108 .resume = iommu_resume,
4109 .suspend = iommu_suspend,
4110};
4111
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004112static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004113{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004114 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004115}
4116
4117#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02004118static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004119#endif /* CONFIG_PM */
4120
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004121
Jiang Liuc2a0b532014-11-09 22:47:56 +08004122int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004123{
4124 struct acpi_dmar_reserved_memory *rmrr;
Eric Auger0659b8d2017-01-19 20:57:53 +00004125 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004126 struct dmar_rmrr_unit *rmrru;
Eric Auger0659b8d2017-01-19 20:57:53 +00004127 size_t length;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004128
4129 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4130 if (!rmrru)
Eric Auger0659b8d2017-01-19 20:57:53 +00004131 goto out;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004132
4133 rmrru->hdr = header;
4134 rmrr = (struct acpi_dmar_reserved_memory *)header;
4135 rmrru->base_address = rmrr->base_address;
4136 rmrru->end_address = rmrr->end_address;
Eric Auger0659b8d2017-01-19 20:57:53 +00004137
4138 length = rmrr->end_address - rmrr->base_address + 1;
4139 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4140 IOMMU_RESV_DIRECT);
4141 if (!rmrru->resv)
4142 goto free_rmrru;
4143
Jiang Liu2e455282014-02-19 14:07:36 +08004144 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4145 ((void *)rmrr) + rmrr->header.length,
4146 &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004147 if (rmrru->devices_cnt && rmrru->devices == NULL)
4148 goto free_all;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004149
Jiang Liu2e455282014-02-19 14:07:36 +08004150 list_add(&rmrru->list, &dmar_rmrr_units);
4151
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004152 return 0;
Eric Auger0659b8d2017-01-19 20:57:53 +00004153free_all:
4154 kfree(rmrru->resv);
4155free_rmrru:
4156 kfree(rmrru);
4157out:
4158 return -ENOMEM;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004159}
4160
Jiang Liu6b197242014-11-09 22:47:58 +08004161static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4162{
4163 struct dmar_atsr_unit *atsru;
4164 struct acpi_dmar_atsr *tmp;
4165
4166 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4167 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4168 if (atsr->segment != tmp->segment)
4169 continue;
4170 if (atsr->header.length != tmp->header.length)
4171 continue;
4172 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4173 return atsru;
4174 }
4175
4176 return NULL;
4177}
4178
4179int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004180{
4181 struct acpi_dmar_atsr *atsr;
4182 struct dmar_atsr_unit *atsru;
4183
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004184 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
Jiang Liu6b197242014-11-09 22:47:58 +08004185 return 0;
4186
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004187 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004188 atsru = dmar_find_atsr(atsr);
4189 if (atsru)
4190 return 0;
4191
4192 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004193 if (!atsru)
4194 return -ENOMEM;
4195
Jiang Liu6b197242014-11-09 22:47:58 +08004196 /*
4197 * If memory is allocated from slab by ACPI _DSM method, we need to
4198 * copy the memory content because the memory buffer will be freed
4199 * on return.
4200 */
4201 atsru->hdr = (void *)(atsru + 1);
4202 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004203 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004204 if (!atsru->include_all) {
4205 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4206 (void *)atsr + atsr->header.length,
4207 &atsru->devices_cnt);
4208 if (atsru->devices_cnt && atsru->devices == NULL) {
4209 kfree(atsru);
4210 return -ENOMEM;
4211 }
4212 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004213
Jiang Liu0e242612014-02-19 14:07:34 +08004214 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004215
4216 return 0;
4217}
4218
Jiang Liu9bdc5312014-01-06 14:18:27 +08004219static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4220{
4221 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4222 kfree(atsru);
4223}
4224
Jiang Liu6b197242014-11-09 22:47:58 +08004225int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4226{
4227 struct acpi_dmar_atsr *atsr;
4228 struct dmar_atsr_unit *atsru;
4229
4230 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4231 atsru = dmar_find_atsr(atsr);
4232 if (atsru) {
4233 list_del_rcu(&atsru->list);
4234 synchronize_rcu();
4235 intel_iommu_free_atsr(atsru);
4236 }
4237
4238 return 0;
4239}
4240
4241int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4242{
4243 int i;
4244 struct device *dev;
4245 struct acpi_dmar_atsr *atsr;
4246 struct dmar_atsr_unit *atsru;
4247
4248 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4249 atsru = dmar_find_atsr(atsr);
4250 if (!atsru)
4251 return 0;
4252
Linus Torvalds194dc872016-07-27 20:03:31 -07004253 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08004254 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4255 i, dev)
4256 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07004257 }
Jiang Liu6b197242014-11-09 22:47:58 +08004258
4259 return 0;
4260}
4261
Jiang Liuffebeb42014-11-09 22:48:02 +08004262static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4263{
4264 int sp, ret = 0;
4265 struct intel_iommu *iommu = dmaru->iommu;
4266
4267 if (g_iommus[iommu->seq_id])
4268 return 0;
4269
4270 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004271 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004272 iommu->name);
4273 return -ENXIO;
4274 }
4275 if (!ecap_sc_support(iommu->ecap) &&
4276 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004277 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004278 iommu->name);
4279 return -ENXIO;
4280 }
4281 sp = domain_update_iommu_superpage(iommu) - 1;
4282 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004283 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004284 iommu->name);
4285 return -ENXIO;
4286 }
4287
4288 /*
4289 * Disable translation if already enabled prior to OS handover.
4290 */
4291 if (iommu->gcmd & DMA_GCMD_TE)
4292 iommu_disable_translation(iommu);
4293
4294 g_iommus[iommu->seq_id] = iommu;
4295 ret = iommu_init_domains(iommu);
4296 if (ret == 0)
4297 ret = iommu_alloc_root_entry(iommu);
4298 if (ret)
4299 goto out;
4300
David Woodhouse8a94ade2015-03-24 14:54:56 +00004301#ifdef CONFIG_INTEL_IOMMU_SVM
4302 if (pasid_enabled(iommu))
Lu Baolud9737952018-07-14 15:47:02 +08004303 intel_svm_init(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00004304#endif
4305
Jiang Liuffebeb42014-11-09 22:48:02 +08004306 if (dmaru->ignored) {
4307 /*
4308 * we always have to disable PMRs or DMA may fail on this device
4309 */
4310 if (force_on)
4311 iommu_disable_protect_mem_regions(iommu);
4312 return 0;
4313 }
4314
4315 intel_iommu_init_qi(iommu);
4316 iommu_flush_write_buffer(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01004317
4318#ifdef CONFIG_INTEL_IOMMU_SVM
4319 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4320 ret = intel_svm_enable_prq(iommu);
4321 if (ret)
4322 goto disable_iommu;
4323 }
4324#endif
Jiang Liuffebeb42014-11-09 22:48:02 +08004325 ret = dmar_set_interrupt(iommu);
4326 if (ret)
4327 goto disable_iommu;
4328
4329 iommu_set_root_entry(iommu);
4330 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4331 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4332 iommu_enable_translation(iommu);
4333
Jiang Liuffebeb42014-11-09 22:48:02 +08004334 iommu_disable_protect_mem_regions(iommu);
4335 return 0;
4336
4337disable_iommu:
4338 disable_dmar_iommu(iommu);
4339out:
4340 free_dmar_iommu(iommu);
4341 return ret;
4342}
4343
Jiang Liu6b197242014-11-09 22:47:58 +08004344int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4345{
Jiang Liuffebeb42014-11-09 22:48:02 +08004346 int ret = 0;
4347 struct intel_iommu *iommu = dmaru->iommu;
4348
4349 if (!intel_iommu_enabled)
4350 return 0;
4351 if (iommu == NULL)
4352 return -EINVAL;
4353
4354 if (insert) {
4355 ret = intel_iommu_add(dmaru);
4356 } else {
4357 disable_dmar_iommu(iommu);
4358 free_dmar_iommu(iommu);
4359 }
4360
4361 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004362}
4363
Jiang Liu9bdc5312014-01-06 14:18:27 +08004364static void intel_iommu_free_dmars(void)
4365{
4366 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4367 struct dmar_atsr_unit *atsru, *atsr_n;
4368
4369 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4370 list_del(&rmrru->list);
4371 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004372 kfree(rmrru->resv);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004373 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004374 }
4375
Jiang Liu9bdc5312014-01-06 14:18:27 +08004376 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4377 list_del(&atsru->list);
4378 intel_iommu_free_atsr(atsru);
4379 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004380}
4381
4382int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4383{
Jiang Liub683b232014-02-19 14:07:32 +08004384 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004385 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004386 struct pci_dev *bridge = NULL;
4387 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004388 struct acpi_dmar_atsr *atsr;
4389 struct dmar_atsr_unit *atsru;
4390
4391 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004392 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004393 bridge = bus->self;
David Woodhoused14053b32015-10-15 09:28:06 +01004394 /* If it's an integrated device, allow ATS */
4395 if (!bridge)
4396 return 1;
4397 /* Connected via non-PCIe: no ATS */
4398 if (!pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004399 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004400 return 0;
David Woodhoused14053b32015-10-15 09:28:06 +01004401 /* If we found the root port, look it up in the ATSR */
Jiang Liub5f82dd2014-02-19 14:07:31 +08004402 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004403 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004404 }
4405
Jiang Liu0e242612014-02-19 14:07:34 +08004406 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004407 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4408 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4409 if (atsr->segment != pci_domain_nr(dev->bus))
4410 continue;
4411
Jiang Liub683b232014-02-19 14:07:32 +08004412 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004413 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004414 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004415
4416 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004417 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004418 }
Jiang Liub683b232014-02-19 14:07:32 +08004419 ret = 0;
4420out:
Jiang Liu0e242612014-02-19 14:07:34 +08004421 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004422
Jiang Liub683b232014-02-19 14:07:32 +08004423 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004424}
4425
Jiang Liu59ce0512014-02-19 14:07:35 +08004426int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4427{
4428 int ret = 0;
4429 struct dmar_rmrr_unit *rmrru;
4430 struct dmar_atsr_unit *atsru;
4431 struct acpi_dmar_atsr *atsr;
4432 struct acpi_dmar_reserved_memory *rmrr;
4433
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004434 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
Jiang Liu59ce0512014-02-19 14:07:35 +08004435 return 0;
4436
4437 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4438 rmrr = container_of(rmrru->hdr,
4439 struct acpi_dmar_reserved_memory, header);
4440 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4441 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4442 ((void *)rmrr) + rmrr->header.length,
4443 rmrr->segment, rmrru->devices,
4444 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004445 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004446 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004447 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004448 dmar_remove_dev_scope(info, rmrr->segment,
4449 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004450 }
4451 }
4452
4453 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4454 if (atsru->include_all)
4455 continue;
4456
4457 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4458 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4459 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4460 (void *)atsr + atsr->header.length,
4461 atsr->segment, atsru->devices,
4462 atsru->devices_cnt);
4463 if (ret > 0)
4464 break;
4465 else if(ret < 0)
4466 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004467 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu59ce0512014-02-19 14:07:35 +08004468 if (dmar_remove_dev_scope(info, atsr->segment,
4469 atsru->devices, atsru->devices_cnt))
4470 break;
4471 }
4472 }
4473
4474 return 0;
4475}
4476
Fenghua Yu99dcade2009-11-11 07:23:06 -08004477/*
4478 * Here we only respond to action of unbound device from driver.
4479 *
4480 * Added device is not attached to its DMAR domain here yet. That will happen
4481 * when mapping the device to iova.
4482 */
4483static int device_notifier(struct notifier_block *nb,
4484 unsigned long action, void *data)
4485{
4486 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004487 struct dmar_domain *domain;
4488
David Woodhouse3d891942014-03-06 15:59:26 +00004489 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004490 return 0;
4491
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004492 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004493 return 0;
4494
David Woodhouse1525a292014-03-06 16:19:30 +00004495 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004496 if (!domain)
4497 return 0;
4498
Joerg Roedele6de0f82015-07-22 16:30:36 +02004499 dmar_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004500 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004501 domain_exit(domain);
Alex Williamsona97590e2011-03-04 14:52:16 -07004502
Fenghua Yu99dcade2009-11-11 07:23:06 -08004503 return 0;
4504}
4505
4506static struct notifier_block device_nb = {
4507 .notifier_call = device_notifier,
4508};
4509
Jiang Liu75f05562014-02-19 14:07:37 +08004510static int intel_iommu_memory_notifier(struct notifier_block *nb,
4511 unsigned long val, void *v)
4512{
4513 struct memory_notify *mhp = v;
4514 unsigned long long start, end;
4515 unsigned long start_vpfn, last_vpfn;
4516
4517 switch (val) {
4518 case MEM_GOING_ONLINE:
4519 start = mhp->start_pfn << PAGE_SHIFT;
4520 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4521 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004522 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004523 start, end);
4524 return NOTIFY_BAD;
4525 }
4526 break;
4527
4528 case MEM_OFFLINE:
4529 case MEM_CANCEL_ONLINE:
4530 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4531 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4532 while (start_vpfn <= last_vpfn) {
4533 struct iova *iova;
4534 struct dmar_drhd_unit *drhd;
4535 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004536 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004537
4538 iova = find_iova(&si_domain->iovad, start_vpfn);
4539 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004540 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004541 start_vpfn);
4542 break;
4543 }
4544
4545 iova = split_and_remove_iova(&si_domain->iovad, iova,
4546 start_vpfn, last_vpfn);
4547 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004548 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004549 start_vpfn, last_vpfn);
4550 return NOTIFY_BAD;
4551 }
4552
David Woodhouseea8ea462014-03-05 17:09:32 +00004553 freelist = domain_unmap(si_domain, iova->pfn_lo,
4554 iova->pfn_hi);
4555
Jiang Liu75f05562014-02-19 14:07:37 +08004556 rcu_read_lock();
4557 for_each_active_iommu(iommu, drhd)
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004558 iommu_flush_iotlb_psi(iommu, si_domain,
Jiang Liua156ef92014-07-11 14:19:36 +08004559 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004560 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004561 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004562 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004563
4564 start_vpfn = iova->pfn_hi + 1;
4565 free_iova_mem(iova);
4566 }
4567 break;
4568 }
4569
4570 return NOTIFY_OK;
4571}
4572
4573static struct notifier_block intel_iommu_memory_nb = {
4574 .notifier_call = intel_iommu_memory_notifier,
4575 .priority = 0
4576};
4577
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004578static void free_all_cpu_cached_iovas(unsigned int cpu)
4579{
4580 int i;
4581
4582 for (i = 0; i < g_num_of_iommus; i++) {
4583 struct intel_iommu *iommu = g_iommus[i];
4584 struct dmar_domain *domain;
Aaron Campbell0caa7612016-07-02 21:23:24 -03004585 int did;
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004586
4587 if (!iommu)
4588 continue;
4589
Jan Niehusmann3bd4f912016-06-06 14:20:11 +02004590 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
Aaron Campbell0caa7612016-07-02 21:23:24 -03004591 domain = get_iommu_domain(iommu, (u16)did);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004592
4593 if (!domain)
4594 continue;
4595 free_cpu_cached_iovas(cpu, &domain->iovad);
4596 }
4597 }
4598}
4599
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004600static int intel_iommu_cpu_dead(unsigned int cpu)
Omer Pelegaa473242016-04-20 11:33:02 +03004601{
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004602 free_all_cpu_cached_iovas(cpu);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004603 return 0;
Omer Pelegaa473242016-04-20 11:33:02 +03004604}
4605
Joerg Roedel161b28a2017-03-28 17:04:52 +02004606static void intel_disable_iommus(void)
4607{
4608 struct intel_iommu *iommu = NULL;
4609 struct dmar_drhd_unit *drhd;
4610
4611 for_each_iommu(iommu, drhd)
4612 iommu_disable_translation(iommu);
4613}
4614
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004615static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4616{
Joerg Roedel2926a2aa2017-08-14 17:19:26 +02004617 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4618
4619 return container_of(iommu_dev, struct intel_iommu, iommu);
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004620}
4621
Alex Williamsona5459cf2014-06-12 16:12:31 -06004622static ssize_t intel_iommu_show_version(struct device *dev,
4623 struct device_attribute *attr,
4624 char *buf)
4625{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004626 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004627 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4628 return sprintf(buf, "%d:%d\n",
4629 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4630}
4631static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4632
4633static ssize_t intel_iommu_show_address(struct device *dev,
4634 struct device_attribute *attr,
4635 char *buf)
4636{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004637 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004638 return sprintf(buf, "%llx\n", iommu->reg_phys);
4639}
4640static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4641
4642static ssize_t intel_iommu_show_cap(struct device *dev,
4643 struct device_attribute *attr,
4644 char *buf)
4645{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004646 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004647 return sprintf(buf, "%llx\n", iommu->cap);
4648}
4649static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4650
4651static ssize_t intel_iommu_show_ecap(struct device *dev,
4652 struct device_attribute *attr,
4653 char *buf)
4654{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004655 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004656 return sprintf(buf, "%llx\n", iommu->ecap);
4657}
4658static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4659
Alex Williamson2238c082015-07-14 15:24:53 -06004660static ssize_t intel_iommu_show_ndoms(struct device *dev,
4661 struct device_attribute *attr,
4662 char *buf)
4663{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004664 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004665 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4666}
4667static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4668
4669static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4670 struct device_attribute *attr,
4671 char *buf)
4672{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004673 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004674 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4675 cap_ndoms(iommu->cap)));
4676}
4677static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4678
Alex Williamsona5459cf2014-06-12 16:12:31 -06004679static struct attribute *intel_iommu_attrs[] = {
4680 &dev_attr_version.attr,
4681 &dev_attr_address.attr,
4682 &dev_attr_cap.attr,
4683 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004684 &dev_attr_domains_supported.attr,
4685 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004686 NULL,
4687};
4688
4689static struct attribute_group intel_iommu_group = {
4690 .name = "intel-iommu",
4691 .attrs = intel_iommu_attrs,
4692};
4693
4694const struct attribute_group *intel_iommu_groups[] = {
4695 &intel_iommu_group,
4696 NULL,
4697};
4698
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004699int __init intel_iommu_init(void)
4700{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004701 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004702 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004703 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004704
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004705 /* VT-d is required for a TXT/tboot launch, so enforce that */
4706 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004707
Jiang Liu3a5670e2014-02-19 14:07:33 +08004708 if (iommu_init_mempool()) {
4709 if (force_on)
4710 panic("tboot: Failed to initialize iommu memory\n");
4711 return -ENOMEM;
4712 }
4713
4714 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004715 if (dmar_table_init()) {
4716 if (force_on)
4717 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004718 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004719 }
4720
Suresh Siddhac2c72862011-08-23 17:05:19 -07004721 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004722 if (force_on)
4723 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004724 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004725 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004726
Joerg Roedelec154bf2017-10-06 15:00:53 +02004727 up_write(&dmar_global_lock);
4728
4729 /*
4730 * The bus notifier takes the dmar_global_lock, so lockdep will
4731 * complain later when we register it under the lock.
4732 */
4733 dmar_register_bus_notifier();
4734
4735 down_write(&dmar_global_lock);
4736
Joerg Roedel161b28a2017-03-28 17:04:52 +02004737 if (no_iommu || dmar_disabled) {
4738 /*
Shaohua Libfd20f12017-04-26 09:18:35 -07004739 * We exit the function here to ensure IOMMU's remapping and
4740 * mempool aren't setup, which means that the IOMMU's PMRs
4741 * won't be disabled via the call to init_dmars(). So disable
4742 * it explicitly here. The PMRs were setup by tboot prior to
4743 * calling SENTER, but the kernel is expected to reset/tear
4744 * down the PMRs.
4745 */
4746 if (intel_iommu_tboot_noforce) {
4747 for_each_iommu(iommu, drhd)
4748 iommu_disable_protect_mem_regions(iommu);
4749 }
4750
4751 /*
Joerg Roedel161b28a2017-03-28 17:04:52 +02004752 * Make sure the IOMMUs are switched off, even when we
4753 * boot into a kexec kernel and the previous kernel left
4754 * them enabled
4755 */
4756 intel_disable_iommus();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004757 goto out_free_dmar;
Joerg Roedel161b28a2017-03-28 17:04:52 +02004758 }
Suresh Siddha2ae21012008-07-10 11:16:43 -07004759
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004760 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004761 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004762
4763 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004764 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004765
Joseph Cihula51a63e62011-03-21 11:04:24 -07004766 if (dmar_init_reserved_ranges()) {
4767 if (force_on)
4768 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004769 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004770 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004771
4772 init_no_remapping_devices();
4773
Joseph Cihulab7792602011-05-03 00:08:37 -07004774 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004775 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004776 if (force_on)
4777 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004778 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004779 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004780 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004781 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004782 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004783
Christoph Hellwig4fac8072017-12-24 13:57:08 +01004784#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004785 swiotlb = 0;
4786#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004787 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004788
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004789 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004790
Joerg Roedel39ab9552017-02-01 16:56:46 +01004791 for_each_active_iommu(iommu, drhd) {
4792 iommu_device_sysfs_add(&iommu->iommu, NULL,
4793 intel_iommu_groups,
4794 "%s", iommu->name);
4795 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4796 iommu_device_register(&iommu->iommu);
4797 }
Alex Williamsona5459cf2014-06-12 16:12:31 -06004798
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004799 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004800 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004801 if (si_domain && !hw_pass_through)
4802 register_memory_notifier(&intel_iommu_memory_nb);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004803 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4804 intel_iommu_cpu_dead);
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004805 intel_iommu_enabled = 1;
4806
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004807 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004808
4809out_free_reserved_range:
4810 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004811out_free_dmar:
4812 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004813 up_write(&dmar_global_lock);
4814 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004815 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004816}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004817
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004818static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
Alex Williamson579305f2014-07-03 09:51:43 -06004819{
4820 struct intel_iommu *iommu = opaque;
4821
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004822 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06004823 return 0;
4824}
4825
4826/*
4827 * NB - intel-iommu lacks any sort of reference counting for the users of
4828 * dependent devices. If multiple endpoints have intersecting dependent
4829 * devices, unbinding the driver from any one of them will possibly leave
4830 * the others unable to operate.
4831 */
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004832static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004833{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004834 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004835 return;
4836
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004837 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004838}
4839
Joerg Roedel127c7612015-07-23 17:44:46 +02004840static void __dmar_remove_one_dev_info(struct device_domain_info *info)
Weidong Hanc7151a82008-12-08 22:51:37 +08004841{
Weidong Hanc7151a82008-12-08 22:51:37 +08004842 struct intel_iommu *iommu;
4843 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08004844
Joerg Roedel55d94042015-07-22 16:50:40 +02004845 assert_spin_locked(&device_domain_lock);
4846
Joerg Roedelb608ac32015-07-21 18:19:08 +02004847 if (WARN_ON(!info))
Weidong Hanc7151a82008-12-08 22:51:37 +08004848 return;
4849
Joerg Roedel127c7612015-07-23 17:44:46 +02004850 iommu = info->iommu;
4851
4852 if (info->dev) {
4853 iommu_disable_dev_iotlb(info);
4854 domain_context_clear(iommu, info->dev);
Lu Baolua7fc93f2018-07-14 15:47:00 +08004855 intel_pasid_free_table(info->dev);
Joerg Roedel127c7612015-07-23 17:44:46 +02004856 }
4857
Joerg Roedelb608ac32015-07-21 18:19:08 +02004858 unlink_domain_info(info);
Roland Dreier3e7abe22011-07-20 06:22:21 -07004859
Joerg Roedeld160aca2015-07-22 11:52:53 +02004860 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004861 domain_detach_iommu(info->domain, iommu);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004862 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004863
4864 free_devinfo_mem(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004865}
4866
Joerg Roedel55d94042015-07-22 16:50:40 +02004867static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4868 struct device *dev)
4869{
Joerg Roedel127c7612015-07-23 17:44:46 +02004870 struct device_domain_info *info;
Joerg Roedel55d94042015-07-22 16:50:40 +02004871 unsigned long flags;
4872
Weidong Hanc7151a82008-12-08 22:51:37 +08004873 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004874 info = dev->archdata.iommu;
4875 __dmar_remove_one_dev_info(info);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004876 spin_unlock_irqrestore(&device_domain_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004877}
4878
4879static int md_domain_init(struct dmar_domain *domain, int guest_width)
4880{
4881 int adjust_width;
4882
Zhen Leiaa3ac942017-09-21 16:52:45 +01004883 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004884 domain_reserve_special_ranges(domain);
4885
4886 /* calculate AGAW */
4887 domain->gaw = guest_width;
4888 adjust_width = guestwidth_to_adjustwidth(guest_width);
4889 domain->agaw = width_to_agaw(adjust_width);
4890
Weidong Han5e98c4b2008-12-08 23:03:27 +08004891 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004892 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004893 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004894 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004895
4896 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004897 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004898 if (!domain->pgd)
4899 return -ENOMEM;
4900 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4901 return 0;
4902}
4903
Joerg Roedel00a77de2015-03-26 13:43:08 +01004904static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004905{
Joerg Roedel5d450802008-12-03 14:52:32 +01004906 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004907 struct iommu_domain *domain;
4908
4909 if (type != IOMMU_DOMAIN_UNMANAGED)
4910 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004911
Jiang Liuab8dfe22014-07-11 14:19:27 +08004912 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004913 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004914 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004915 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004916 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004917 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004918 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004919 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004920 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004921 }
Allen Kay8140a952011-10-14 12:32:17 -07004922 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004923
Joerg Roedel00a77de2015-03-26 13:43:08 +01004924 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004925 domain->geometry.aperture_start = 0;
4926 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4927 domain->geometry.force_aperture = true;
4928
Joerg Roedel00a77de2015-03-26 13:43:08 +01004929 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004930}
Kay, Allen M38717942008-09-09 18:37:29 +03004931
Joerg Roedel00a77de2015-03-26 13:43:08 +01004932static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004933{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004934 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004935}
Kay, Allen M38717942008-09-09 18:37:29 +03004936
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004937static int intel_iommu_attach_device(struct iommu_domain *domain,
4938 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004939{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004940 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004941 struct intel_iommu *iommu;
4942 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004943 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004944
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004945 if (device_is_rmrr_locked(dev)) {
4946 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4947 return -EPERM;
4948 }
4949
David Woodhouse7207d8f2014-03-09 16:31:06 -07004950 /* normally dev is not mapped */
4951 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004952 struct dmar_domain *old_domain;
4953
David Woodhouse1525a292014-03-06 16:19:30 +00004954 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004955 if (old_domain) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02004956 rcu_read_lock();
Joerg Roedelde7e8882015-07-22 11:58:07 +02004957 dmar_remove_one_dev_info(old_domain, dev);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004958 rcu_read_unlock();
Joerg Roedel62c22162014-12-09 12:56:45 +01004959
4960 if (!domain_type_is_vm_or_si(old_domain) &&
4961 list_empty(&old_domain->devices))
4962 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004963 }
4964 }
4965
David Woodhouse156baca2014-03-09 14:00:57 -07004966 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004967 if (!iommu)
4968 return -ENODEV;
4969
4970 /* check if this iommu agaw is sufficient for max mapped address */
4971 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004972 if (addr_width > cap_mgaw(iommu->cap))
4973 addr_width = cap_mgaw(iommu->cap);
4974
4975 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004976 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004977 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004978 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004979 return -EFAULT;
4980 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004981 dmar_domain->gaw = addr_width;
4982
4983 /*
4984 * Knock out extra levels of page tables if necessary
4985 */
4986 while (iommu->agaw < dmar_domain->agaw) {
4987 struct dma_pte *pte;
4988
4989 pte = dmar_domain->pgd;
4990 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004991 dmar_domain->pgd = (struct dma_pte *)
4992 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004993 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004994 }
4995 dmar_domain->agaw--;
4996 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004997
Joerg Roedel28ccce02015-07-21 14:45:31 +02004998 return domain_add_dev_info(dmar_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004999}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005000
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005001static void intel_iommu_detach_device(struct iommu_domain *domain,
5002 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005003{
Joerg Roedele6de0f82015-07-22 16:30:36 +02005004 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03005005}
Kay, Allen M38717942008-09-09 18:37:29 +03005006
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005007static int intel_iommu_map(struct iommu_domain *domain,
5008 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005009 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03005010{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005011 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005012 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005013 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005014 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005015
Joerg Roedeldde57a22008-12-03 15:04:09 +01005016 if (iommu_prot & IOMMU_READ)
5017 prot |= DMA_PTE_READ;
5018 if (iommu_prot & IOMMU_WRITE)
5019 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08005020 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5021 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005022
David Woodhouse163cc522009-06-28 00:51:17 +01005023 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005024 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005025 u64 end;
5026
5027 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01005028 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005029 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005030 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005031 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01005032 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005033 return -EFAULT;
5034 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01005035 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005036 }
David Woodhousead051222009-06-28 14:22:28 +01005037 /* Round up size to next multiple of PAGE_SIZE, if it and
5038 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01005039 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01005040 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5041 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005042 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03005043}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005044
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005045static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00005046 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005047{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005048 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00005049 struct page *freelist = NULL;
David Woodhouseea8ea462014-03-05 17:09:32 +00005050 unsigned long start_pfn, last_pfn;
5051 unsigned int npages;
Joerg Roedel42e8c182015-07-21 15:50:02 +02005052 int iommu_id, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01005053
David Woodhouse5cf0a762014-03-19 16:07:49 +00005054 /* Cope with horrid API which requires us to unmap more than the
5055 size argument if it happens to be a large-page mapping. */
Joerg Roedeldc02e462015-08-13 11:15:13 +02005056 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
David Woodhouse5cf0a762014-03-19 16:07:49 +00005057
5058 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5059 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5060
David Woodhouseea8ea462014-03-05 17:09:32 +00005061 start_pfn = iova >> VTD_PAGE_SHIFT;
5062 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5063
5064 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5065
5066 npages = last_pfn - start_pfn + 1;
5067
Shaokun Zhangf746a022018-03-22 18:18:06 +08005068 for_each_domain_iommu(iommu_id, dmar_domain)
Joerg Roedel42e8c182015-07-21 15:50:02 +02005069 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5070 start_pfn, npages, !freelist, 0);
David Woodhouseea8ea462014-03-05 17:09:32 +00005071
5072 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005073
David Woodhouse163cc522009-06-28 00:51:17 +01005074 if (dmar_domain->max_addr == iova + size)
5075 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005076
David Woodhouse5cf0a762014-03-19 16:07:49 +00005077 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005078}
Kay, Allen M38717942008-09-09 18:37:29 +03005079
Joerg Roedeld14d6572008-12-03 15:06:57 +01005080static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05305081 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03005082{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005083 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03005084 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00005085 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005086 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03005087
David Woodhouse5cf0a762014-03-19 16:07:49 +00005088 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03005089 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005090 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03005091
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005092 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03005093}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005094
Joerg Roedel5d587b82014-09-05 10:50:45 +02005095static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005096{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005097 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005098 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04005099 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005100 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005101
Joerg Roedel5d587b82014-09-05 10:50:45 +02005102 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005103}
5104
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005105static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005106{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005107 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005108 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07005109 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04005110
Alex Williamsona5459cf2014-06-12 16:12:31 -06005111 iommu = device_to_iommu(dev, &bus, &devfn);
5112 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005113 return -ENODEV;
5114
Joerg Roedele3d10af2017-02-01 17:23:22 +01005115 iommu_device_link(&iommu->iommu, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005116
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005117 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06005118
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005119 if (IS_ERR(group))
5120 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005121
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005122 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005123 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005124}
5125
5126static void intel_iommu_remove_device(struct device *dev)
5127{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005128 struct intel_iommu *iommu;
5129 u8 bus, devfn;
5130
5131 iommu = device_to_iommu(dev, &bus, &devfn);
5132 if (!iommu)
5133 return;
5134
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005135 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06005136
Joerg Roedele3d10af2017-02-01 17:23:22 +01005137 iommu_device_unlink(&iommu->iommu, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005138}
5139
Eric Auger0659b8d2017-01-19 20:57:53 +00005140static void intel_iommu_get_resv_regions(struct device *device,
5141 struct list_head *head)
5142{
5143 struct iommu_resv_region *reg;
5144 struct dmar_rmrr_unit *rmrr;
5145 struct device *i_dev;
5146 int i;
5147
5148 rcu_read_lock();
5149 for_each_rmrr_units(rmrr) {
5150 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5151 i, i_dev) {
5152 if (i_dev != device)
5153 continue;
5154
5155 list_add_tail(&rmrr->resv->list, head);
5156 }
5157 }
5158 rcu_read_unlock();
5159
5160 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5161 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00005162 0, IOMMU_RESV_MSI);
Eric Auger0659b8d2017-01-19 20:57:53 +00005163 if (!reg)
5164 return;
5165 list_add_tail(&reg->list, head);
5166}
5167
5168static void intel_iommu_put_resv_regions(struct device *dev,
5169 struct list_head *head)
5170{
5171 struct iommu_resv_region *entry, *next;
5172
5173 list_for_each_entry_safe(entry, next, head, list) {
5174 if (entry->type == IOMMU_RESV_RESERVED)
5175 kfree(entry);
5176 }
Kay, Allen M38717942008-09-09 18:37:29 +03005177}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005178
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005179#ifdef CONFIG_INTEL_IOMMU_SVM
Jacob Pan65ca7f52016-12-06 10:14:23 -08005180#define MAX_NR_PASID_BITS (20)
Lu Baolu4774cc52018-07-14 15:47:01 +08005181static inline unsigned long intel_iommu_get_pts(struct device *dev)
Jacob Pan65ca7f52016-12-06 10:14:23 -08005182{
Lu Baolu4774cc52018-07-14 15:47:01 +08005183 int pts, max_pasid;
5184
5185 max_pasid = intel_pasid_get_dev_max_id(dev);
5186 pts = find_first_bit((unsigned long *)&max_pasid, MAX_NR_PASID_BITS);
5187 if (pts < 5)
Jacob Pan65ca7f52016-12-06 10:14:23 -08005188 return 0;
5189
Lu Baolu4774cc52018-07-14 15:47:01 +08005190 return pts - 5;
Jacob Pan65ca7f52016-12-06 10:14:23 -08005191}
5192
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005193int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5194{
5195 struct device_domain_info *info;
5196 struct context_entry *context;
5197 struct dmar_domain *domain;
5198 unsigned long flags;
5199 u64 ctx_lo;
5200 int ret;
5201
5202 domain = get_valid_domain_for_dev(sdev->dev);
5203 if (!domain)
5204 return -EINVAL;
5205
5206 spin_lock_irqsave(&device_domain_lock, flags);
5207 spin_lock(&iommu->lock);
5208
5209 ret = -EINVAL;
5210 info = sdev->dev->archdata.iommu;
5211 if (!info || !info->pasid_supported)
5212 goto out;
5213
5214 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5215 if (WARN_ON(!context))
5216 goto out;
5217
5218 ctx_lo = context[0].lo;
5219
5220 sdev->did = domain->iommu_did[iommu->seq_id];
5221 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5222
5223 if (!(ctx_lo & CONTEXT_PASIDE)) {
Ashok Raj11b93eb2017-08-08 13:29:28 -07005224 if (iommu->pasid_state_table)
5225 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
Lu Baolu4774cc52018-07-14 15:47:01 +08005226 context[1].lo = (u64)virt_to_phys(info->pasid_table->table) |
5227 intel_iommu_get_pts(sdev->dev);
Jacob Pan65ca7f52016-12-06 10:14:23 -08005228
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005229 wmb();
5230 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5231 * extended to permit requests-with-PASID if the PASIDE bit
5232 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5233 * however, the PASIDE bit is ignored and requests-with-PASID
5234 * are unconditionally blocked. Which makes less sense.
5235 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5236 * "guest mode" translation types depending on whether ATS
5237 * is available or not. Annoyingly, we can't use the new
5238 * modes *unless* PASIDE is set. */
5239 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5240 ctx_lo &= ~CONTEXT_TT_MASK;
5241 if (info->ats_supported)
5242 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5243 else
5244 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5245 }
5246 ctx_lo |= CONTEXT_PASIDE;
David Woodhouse907fea32015-10-13 14:11:13 +01005247 if (iommu->pasid_state_table)
5248 ctx_lo |= CONTEXT_DINVE;
David Woodhousea222a7f2015-10-07 23:35:18 +01005249 if (info->pri_supported)
5250 ctx_lo |= CONTEXT_PRS;
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005251 context[0].lo = ctx_lo;
5252 wmb();
5253 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5254 DMA_CCMD_MASK_NOBIT,
5255 DMA_CCMD_DEVICE_INVL);
5256 }
5257
5258 /* Enable PASID support in the device, if it wasn't already */
5259 if (!info->pasid_enabled)
5260 iommu_enable_dev_iotlb(info);
5261
5262 if (info->ats_enabled) {
5263 sdev->dev_iotlb = 1;
5264 sdev->qdep = info->ats_qdep;
5265 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5266 sdev->qdep = 0;
5267 }
5268 ret = 0;
5269
5270 out:
5271 spin_unlock(&iommu->lock);
5272 spin_unlock_irqrestore(&device_domain_lock, flags);
5273
5274 return ret;
5275}
5276
5277struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5278{
5279 struct intel_iommu *iommu;
5280 u8 bus, devfn;
5281
5282 if (iommu_dummy(dev)) {
5283 dev_warn(dev,
5284 "No IOMMU translation for device; cannot enable SVM\n");
5285 return NULL;
5286 }
5287
5288 iommu = device_to_iommu(dev, &bus, &devfn);
5289 if ((!iommu)) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005290 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005291 return NULL;
5292 }
5293
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005294 return iommu;
5295}
5296#endif /* CONFIG_INTEL_IOMMU_SVM */
5297
Joerg Roedelb0119e82017-02-01 13:23:08 +01005298const struct iommu_ops intel_iommu_ops = {
Eric Auger0659b8d2017-01-19 20:57:53 +00005299 .capable = intel_iommu_capable,
5300 .domain_alloc = intel_iommu_domain_alloc,
5301 .domain_free = intel_iommu_domain_free,
5302 .attach_dev = intel_iommu_attach_device,
5303 .detach_dev = intel_iommu_detach_device,
5304 .map = intel_iommu_map,
5305 .unmap = intel_iommu_unmap,
5306 .map_sg = default_iommu_map_sg,
5307 .iova_to_phys = intel_iommu_iova_to_phys,
5308 .add_device = intel_iommu_add_device,
5309 .remove_device = intel_iommu_remove_device,
5310 .get_resv_regions = intel_iommu_get_resv_regions,
5311 .put_resv_regions = intel_iommu_put_resv_regions,
5312 .device_group = pci_device_group,
5313 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005314};
David Woodhouse9af88142009-02-13 23:18:03 +00005315
Daniel Vetter94526182013-01-20 23:50:13 +01005316static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5317{
5318 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005319 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01005320 dmar_map_gfx = 0;
5321}
5322
5323DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5325DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5326DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5327DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5328DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5329DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5330
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005331static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00005332{
5333 /*
5334 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01005335 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00005336 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005337 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00005338 rwbf_quirk = 1;
5339}
5340
5341DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01005342DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5343DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5344DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5345DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5347DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07005348
Adam Jacksoneecfd572010-08-25 21:17:34 +01005349#define GGC 0x52
5350#define GGC_MEMORY_SIZE_MASK (0xf << 8)
5351#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5352#define GGC_MEMORY_SIZE_1M (0x1 << 8)
5353#define GGC_MEMORY_SIZE_2M (0x3 << 8)
5354#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5355#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5356#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5357#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5358
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005359static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01005360{
5361 unsigned short ggc;
5362
Adam Jacksoneecfd572010-08-25 21:17:34 +01005363 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01005364 return;
5365
Adam Jacksoneecfd572010-08-25 21:17:34 +01005366 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005367 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01005368 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005369 } else if (dmar_map_gfx) {
5370 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005371 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005372 intel_iommu_strict = 1;
5373 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01005374}
5375DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5376DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5377DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5378DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5379
David Woodhousee0fc7e02009-09-30 09:12:17 -07005380/* On Tylersburg chipsets, some BIOSes have been known to enable the
5381 ISOCH DMAR unit for the Azalia sound device, but not give it any
5382 TLB entries, which causes it to deadlock. Check for that. We do
5383 this in a function called from init_dmars(), instead of in a PCI
5384 quirk, because we don't want to print the obnoxious "BIOS broken"
5385 message if VT-d is actually disabled.
5386*/
5387static void __init check_tylersburg_isoch(void)
5388{
5389 struct pci_dev *pdev;
5390 uint32_t vtisochctrl;
5391
5392 /* If there's no Azalia in the system anyway, forget it. */
5393 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5394 if (!pdev)
5395 return;
5396 pci_dev_put(pdev);
5397
5398 /* System Management Registers. Might be hidden, in which case
5399 we can't do the sanity check. But that's OK, because the
5400 known-broken BIOSes _don't_ actually hide it, so far. */
5401 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5402 if (!pdev)
5403 return;
5404
5405 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5406 pci_dev_put(pdev);
5407 return;
5408 }
5409
5410 pci_dev_put(pdev);
5411
5412 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5413 if (vtisochctrl & 1)
5414 return;
5415
5416 /* Drop all bits other than the number of TLB entries */
5417 vtisochctrl &= 0x1c;
5418
5419 /* If we have the recommended number of TLB entries (16), fine. */
5420 if (vtisochctrl == 0x10)
5421 return;
5422
5423 /* Zero TLB entries? You get to ride the short bus to school. */
5424 if (!vtisochctrl) {
5425 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5426 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5427 dmi_get_system_info(DMI_BIOS_VENDOR),
5428 dmi_get_system_info(DMI_BIOS_VERSION),
5429 dmi_get_system_info(DMI_PRODUCT_VERSION));
5430 iommu_identity_mapping |= IDENTMAP_AZALIA;
5431 return;
5432 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005433
5434 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005435 vtisochctrl);
5436}