blob: 277838dbc3a628872f85a442e2ea8e2e38d6469e [file] [log] [blame]
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02001/*
Joerg Roedel5d0d7152010-10-13 11:13:21 +02002 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01003 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02004 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020022#include <linux/list.h>
Baoquan He5c87f622016-09-15 16:50:51 +080023#include <linux/bitmap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +010025#include <linux/syscore_ops.h>
Joerg Roedela80dc3e2008-09-11 16:51:41 +020026#include <linux/interrupt.h>
27#include <linux/msi.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020028#include <linux/amd-iommu.h>
Joerg Roedel400a28a2011-11-28 15:11:02 +010029#include <linux/export.h>
Alex Williamson066f2e92014-06-12 16:12:37 -060030#include <linux/iommu.h>
Lucas Stachebcfa282016-10-26 13:09:53 +020031#include <linux/kmemleak.h>
Joerg Roedel54bd6352017-06-15 10:36:22 +020032#include <linux/crash_dump.h>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020033#include <asm/pci-direct.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090034#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010035#include <asm/gart.h>
FUJITA Tomonoriea1b0d32009-11-10 19:46:15 +090036#include <asm/x86_init.h>
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -040037#include <asm/iommu_table.h>
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +020038#include <asm/io_apic.h>
Joerg Roedel6b474b82012-06-26 16:46:04 +020039#include <asm/irq_remapping.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020040
41#include "amd_iommu_proto.h"
42#include "amd_iommu_types.h"
Joerg Roedel05152a02012-06-15 16:53:51 +020043#include "irq_remapping.h"
Joerg Roedel403f81d2011-06-14 16:44:25 +020044
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020045/*
46 * definitions for the ACPI scanning code
47 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020048#define IVRS_HEADER_LENGTH 48
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020049
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -040050#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020051#define ACPI_IVMD_TYPE_ALL 0x20
52#define ACPI_IVMD_TYPE 0x21
53#define ACPI_IVMD_TYPE_RANGE 0x22
54
55#define IVHD_DEV_ALL 0x01
56#define IVHD_DEV_SELECT 0x02
57#define IVHD_DEV_SELECT_RANGE_START 0x03
58#define IVHD_DEV_RANGE_END 0x04
59#define IVHD_DEV_ALIAS 0x42
60#define IVHD_DEV_ALIAS_RANGE 0x43
61#define IVHD_DEV_EXT_SELECT 0x46
62#define IVHD_DEV_EXT_SELECT_RANGE 0x47
Joerg Roedel6efed632012-06-14 15:52:58 +020063#define IVHD_DEV_SPECIAL 0x48
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -040064#define IVHD_DEV_ACPI_HID 0xf0
Joerg Roedel6efed632012-06-14 15:52:58 +020065
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -040066#define UID_NOT_PRESENT 0
67#define UID_IS_INTEGER 1
68#define UID_IS_CHARACTER 2
69
Joerg Roedel6efed632012-06-14 15:52:58 +020070#define IVHD_SPECIAL_IOAPIC 1
71#define IVHD_SPECIAL_HPET 2
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020072
Joerg Roedel6da73422009-05-04 11:44:38 +020073#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
74#define IVHD_FLAG_PASSPW_EN_MASK 0x02
75#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
76#define IVHD_FLAG_ISOC_EN_MASK 0x08
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020077
78#define IVMD_FLAG_EXCL_RANGE 0x08
79#define IVMD_FLAG_UNITY_MAP 0x01
80
81#define ACPI_DEVFLAG_INITPASS 0x01
82#define ACPI_DEVFLAG_EXTINT 0x02
83#define ACPI_DEVFLAG_NMI 0x04
84#define ACPI_DEVFLAG_SYSMGT1 0x10
85#define ACPI_DEVFLAG_SYSMGT2 0x20
86#define ACPI_DEVFLAG_LINT0 0x40
87#define ACPI_DEVFLAG_LINT1 0x80
88#define ACPI_DEVFLAG_ATSDIS 0x10000000
89
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -050090#define LOOP_TIMEOUT 100000
Joerg Roedelb65233a2008-07-11 17:14:21 +020091/*
92 * ACPI table definitions
93 *
94 * These data structures are laid over the table to parse the important values
95 * out of it.
96 */
97
Joerg Roedelb0119e82017-02-01 13:23:08 +010098extern const struct iommu_ops amd_iommu_ops;
99
Joerg Roedelb65233a2008-07-11 17:14:21 +0200100/*
101 * structure describing one IOMMU in the ACPI table. Typically followed by one
102 * or more ivhd_entrys.
103 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200104struct ivhd_header {
105 u8 type;
106 u8 flags;
107 u16 length;
108 u16 devid;
109 u16 cap_ptr;
110 u64 mmio_phys;
111 u16 pci_seg;
112 u16 info;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -0400113 u32 efr_attr;
114
115 /* Following only valid on IVHD type 11h and 40h */
116 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
117 u64 res;
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200118} __attribute__((packed));
119
Joerg Roedelb65233a2008-07-11 17:14:21 +0200120/*
121 * A device entry describing which devices a specific IOMMU translates and
122 * which requestor ids they use.
123 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200124struct ivhd_entry {
125 u8 type;
126 u16 devid;
127 u8 flags;
128 u32 ext;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400129 u32 hidh;
130 u64 cid;
131 u8 uidf;
132 u8 uidl;
133 u8 uid;
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200134} __attribute__((packed));
135
Joerg Roedelb65233a2008-07-11 17:14:21 +0200136/*
137 * An AMD IOMMU memory definition structure. It defines things like exclusion
138 * ranges for devices and regions that should be unity mapped.
139 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200140struct ivmd_header {
141 u8 type;
142 u8 flags;
143 u16 length;
144 u16 devid;
145 u16 aux;
146 u64 resv;
147 u64 range_start;
148 u64 range_length;
149} __attribute__((packed));
150
Joerg Roedelfefda112009-05-20 12:21:42 +0200151bool amd_iommu_dump;
Joerg Roedel05152a02012-06-15 16:53:51 +0200152bool amd_iommu_irq_remap __read_mostly;
Joerg Roedelfefda112009-05-20 12:21:42 +0200153
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -0500154int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -0500155
Joerg Roedel02f3b3f2012-06-11 17:45:25 +0200156static bool amd_iommu_detected;
Joerg Roedela5235722010-05-11 17:12:33 +0200157static bool __initdata amd_iommu_disabled;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400158static int amd_iommu_target_ivhd_type;
Joerg Roedelc1cbebe2008-07-03 19:35:10 +0200159
Joerg Roedelb65233a2008-07-11 17:14:21 +0200160u16 amd_iommu_last_bdf; /* largest PCI device id we have
161 to handle */
Joerg Roedel2e228472008-07-11 17:14:31 +0200162LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
Joerg Roedelb65233a2008-07-11 17:14:21 +0200163 we find in ACPI */
Viresh Kumar621a5f72015-09-26 15:04:07 -0700164bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
Joerg Roedel928abd22008-06-26 21:27:40 +0200165
Joerg Roedel2e228472008-07-11 17:14:31 +0200166LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
Joerg Roedelb65233a2008-07-11 17:14:21 +0200167 system */
168
Joerg Roedelbb527772009-11-20 14:31:51 +0100169/* Array to assign indices to IOMMUs*/
170struct amd_iommu *amd_iommus[MAX_IOMMUS];
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -0600171
172/* Number of IOMMUs present in the system */
173static int amd_iommus_present;
Joerg Roedelbb527772009-11-20 14:31:51 +0100174
Joerg Roedel318afd42009-11-23 18:32:38 +0100175/* IOMMUs have a non-present cache? */
176bool amd_iommu_np_cache __read_mostly;
Joerg Roedel60f723b2011-04-05 12:50:24 +0200177bool amd_iommu_iotlb_sup __read_mostly = true;
Joerg Roedel318afd42009-11-23 18:32:38 +0100178
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600179u32 amd_iommu_max_pasid __read_mostly = ~0;
Joerg Roedel62f71ab2011-11-10 14:41:57 +0100180
Joerg Roedel400a28a2011-11-28 15:11:02 +0100181bool amd_iommu_v2_present __read_mostly;
Joerg Roedel4160cd92015-08-13 11:31:48 +0200182static bool amd_iommu_pc_present __read_mostly;
Joerg Roedel400a28a2011-11-28 15:11:02 +0100183
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100184bool amd_iommu_force_isolation __read_mostly;
185
Joerg Roedelb65233a2008-07-11 17:14:21 +0200186/*
Joerg Roedelaeb26f52009-11-20 16:44:01 +0100187 * List of protection domains - used during resume
188 */
189LIST_HEAD(amd_iommu_pd_list);
190spinlock_t amd_iommu_pd_lock;
191
192/*
Joerg Roedelb65233a2008-07-11 17:14:21 +0200193 * Pointer to the device table which is shared by all AMD IOMMUs
194 * it is indexed by the PCI device id or the HT unit id and contains
195 * information about the domain the device belongs to as well as the
196 * page table root pointer.
197 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200198struct dev_table_entry *amd_iommu_dev_table;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200199
200/*
201 * The alias table is a driver specific data structure which contains the
202 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
203 * More than one device can share the same requestor id.
204 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200205u16 *amd_iommu_alias_table;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200206
207/*
208 * The rlookup table is used to find the IOMMU which is responsible
209 * for a specific device. It is also indexed by the PCI device id.
210 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200211struct amd_iommu **amd_iommu_rlookup_table;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200212
213/*
Joerg Roedel0ea2c422012-06-15 18:05:20 +0200214 * This table is used to find the irq remapping table for a given device id
215 * quickly.
216 */
217struct irq_remap_table **irq_lookup_table;
218
219/*
Frank Arnolddf805ab2012-08-27 19:21:04 +0200220 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
Joerg Roedelb65233a2008-07-11 17:14:21 +0200221 * to know which ones are already in use.
222 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200223unsigned long *amd_iommu_pd_alloc_bitmap;
224
Joerg Roedelb65233a2008-07-11 17:14:21 +0200225static u32 dev_table_size; /* size of the device table */
226static u32 alias_table_size; /* size of the alias table */
227static u32 rlookup_table_size; /* size if the rlookup table */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200228
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200229enum iommu_init_state {
230 IOMMU_START_STATE,
231 IOMMU_IVRS_DETECTED,
232 IOMMU_ACPI_FINISHED,
233 IOMMU_ENABLED,
234 IOMMU_PCI_INIT,
235 IOMMU_INTERRUPTS_EN,
236 IOMMU_DMA_OPS,
237 IOMMU_INITIALIZED,
238 IOMMU_NOT_FOUND,
239 IOMMU_INIT_ERROR,
Joerg Roedel1b1e9422017-06-16 16:09:56 +0200240 IOMMU_CMDLINE_DISABLED,
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200241};
242
Joerg Roedel235dacb2013-04-09 17:53:14 +0200243/* Early ioapic and hpet maps from kernel command line */
244#define EARLY_MAP_SIZE 4
245static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
246static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400247static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
248
Joerg Roedel235dacb2013-04-09 17:53:14 +0200249static int __initdata early_ioapic_map_size;
250static int __initdata early_hpet_map_size;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400251static int __initdata early_acpihid_map_size;
252
Joerg Roedeldfbb6d42013-04-09 19:06:18 +0200253static bool __initdata cmdline_maps;
Joerg Roedel235dacb2013-04-09 17:53:14 +0200254
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200255static enum iommu_init_state init_state = IOMMU_START_STATE;
256
Gerard Snitselaarae295142012-03-16 11:38:22 -0700257static int amd_iommu_enable_interrupts(void);
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200258static int __init iommu_go_to_state(enum iommu_init_state state);
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200259static void init_device_table_dma(void);
Joerg Roedel3d9761e2012-03-15 16:39:21 +0100260
Baoquan He4c232a72017-08-09 16:33:33 +0800261bool translation_pre_enabled(struct amd_iommu *iommu)
262{
263 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
264}
265
266static void clear_translation_pre_enabled(struct amd_iommu *iommu)
267{
268 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
269}
270
271static void init_translation_status(struct amd_iommu *iommu)
272{
273 u32 ctrl;
274
275 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
276 if (ctrl & (1<<CONTROL_IOMMU_EN))
277 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
278}
279
Joerg Roedel208ec8c2008-07-11 17:14:24 +0200280static inline void update_last_devid(u16 devid)
281{
282 if (devid > amd_iommu_last_bdf)
283 amd_iommu_last_bdf = devid;
284}
285
Joerg Roedelc5714842008-07-11 17:14:25 +0200286static inline unsigned long tbl_size(int entry_size)
287{
288 unsigned shift = PAGE_SHIFT +
Neil Turton421f9092009-05-14 14:00:35 +0100289 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
Joerg Roedelc5714842008-07-11 17:14:25 +0200290
291 return 1UL << shift;
292}
293
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -0600294int amd_iommu_get_num_iommus(void)
295{
296 return amd_iommus_present;
297}
298
Matthew Garrett5bcd7572010-10-04 14:59:31 -0400299/* Access to l1 and l2 indexed register spaces */
300
301static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
302{
303 u32 val;
304
305 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
306 pci_read_config_dword(iommu->dev, 0xfc, &val);
307 return val;
308}
309
310static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
311{
312 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
313 pci_write_config_dword(iommu->dev, 0xfc, val);
314 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
315}
316
317static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
318{
319 u32 val;
320
321 pci_write_config_dword(iommu->dev, 0xf0, address);
322 pci_read_config_dword(iommu->dev, 0xf4, &val);
323 return val;
324}
325
326static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
327{
328 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
329 pci_write_config_dword(iommu->dev, 0xf4, val);
330}
331
Joerg Roedelb65233a2008-07-11 17:14:21 +0200332/****************************************************************************
333 *
334 * AMD IOMMU MMIO register space handling functions
335 *
336 * These functions are used to program the IOMMU device registers in
337 * MMIO space required for that driver.
338 *
339 ****************************************************************************/
340
341/*
342 * This function set the exclusion range in the IOMMU. DMA accesses to the
343 * exclusion range are passed through untranslated
344 */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200345static void iommu_set_exclusion_range(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200346{
347 u64 start = iommu->exclusion_start & PAGE_MASK;
348 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
349 u64 entry;
350
351 if (!iommu->exclusion_start)
352 return;
353
354 entry = start | MMIO_EXCL_ENABLE_MASK;
355 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
356 &entry, sizeof(entry));
357
358 entry = limit;
359 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
360 &entry, sizeof(entry));
361}
362
Joerg Roedelb65233a2008-07-11 17:14:21 +0200363/* Programs the physical address of the device table into the IOMMU hardware */
Jan Beulich6b7f0002012-03-08 08:58:13 +0000364static void iommu_set_device_table(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200365{
Andreas Herrmannf6098912008-10-16 16:27:36 +0200366 u64 entry;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200367
368 BUG_ON(iommu->mmio_base == NULL);
369
370 entry = virt_to_phys(amd_iommu_dev_table);
371 entry |= (dev_table_size >> 12) - 1;
372 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
373 &entry, sizeof(entry));
374}
375
Joerg Roedelb65233a2008-07-11 17:14:21 +0200376/* Generic functions to enable/disable certain features of the IOMMU. */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200377static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200378{
379 u32 ctrl;
380
381 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
382 ctrl |= (1 << bit);
383 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
384}
385
Joerg Roedelca0207112009-10-28 18:02:26 +0100386static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200387{
388 u32 ctrl;
389
Joerg Roedel199d0d52008-09-17 16:45:59 +0200390 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200391 ctrl &= ~(1 << bit);
392 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
393}
394
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100395static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
396{
397 u32 ctrl;
398
399 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
400 ctrl &= ~CTRL_INV_TO_MASK;
401 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
402 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
403}
404
Joerg Roedelb65233a2008-07-11 17:14:21 +0200405/* Function to enable the hardware */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200406static void iommu_enable(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200407{
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200408 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200409}
410
Joerg Roedel92ac4322009-05-19 19:06:27 +0200411static void iommu_disable(struct amd_iommu *iommu)
Joerg Roedel126c52b2008-09-09 16:47:35 +0200412{
Chris Wrighta8c485b2009-06-15 15:53:45 +0200413 /* Disable command buffer */
414 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
415
416 /* Disable event logging and event interrupts */
417 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
418 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
419
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500420 /* Disable IOMMU GA_LOG */
421 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
422 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
423
Chris Wrighta8c485b2009-06-15 15:53:45 +0200424 /* Disable IOMMU hardware itself */
Joerg Roedel92ac4322009-05-19 19:06:27 +0200425 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
Joerg Roedel126c52b2008-09-09 16:47:35 +0200426}
427
Joerg Roedelb65233a2008-07-11 17:14:21 +0200428/*
429 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
430 * the system has one.
431 */
Steven L Kinney30861dd2013-06-05 16:11:48 -0500432static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
Joerg Roedel6c567472008-06-26 21:27:43 +0200433{
Steven L Kinney30861dd2013-06-05 16:11:48 -0500434 if (!request_mem_region(address, end, "amd_iommu")) {
435 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
436 address, end);
Joerg Roedele82752d2010-05-28 14:26:48 +0200437 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
Joerg Roedel6c567472008-06-26 21:27:43 +0200438 return NULL;
Joerg Roedele82752d2010-05-28 14:26:48 +0200439 }
Joerg Roedel6c567472008-06-26 21:27:43 +0200440
Steven L Kinney30861dd2013-06-05 16:11:48 -0500441 return (u8 __iomem *)ioremap_nocache(address, end);
Joerg Roedel6c567472008-06-26 21:27:43 +0200442}
443
444static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
445{
446 if (iommu->mmio_base)
447 iounmap(iommu->mmio_base);
Steven L Kinney30861dd2013-06-05 16:11:48 -0500448 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
Joerg Roedel6c567472008-06-26 21:27:43 +0200449}
450
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400451static inline u32 get_ivhd_header_size(struct ivhd_header *h)
452{
453 u32 size = 0;
454
455 switch (h->type) {
456 case 0x10:
457 size = 24;
458 break;
459 case 0x11:
460 case 0x40:
461 size = 40;
462 break;
463 }
464 return size;
465}
466
Joerg Roedelb65233a2008-07-11 17:14:21 +0200467/****************************************************************************
468 *
469 * The functions below belong to the first pass of AMD IOMMU ACPI table
470 * parsing. In this pass we try to find out the highest device id this
471 * code has to handle. Upon this information the size of the shared data
472 * structures is determined later.
473 *
474 ****************************************************************************/
475
476/*
Joerg Roedelb514e552008-09-17 17:14:27 +0200477 * This function calculates the length of a given IVHD entry
478 */
479static inline int ivhd_entry_length(u8 *ivhd)
480{
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400481 u32 type = ((struct ivhd_entry *)ivhd)->type;
482
483 if (type < 0x80) {
484 return 0x04 << (*ivhd >> 6);
485 } else if (type == IVHD_DEV_ACPI_HID) {
486 /* For ACPI_HID, offset 21 is uid len */
487 return *((u8 *)ivhd + 21) + 22;
488 }
489 return 0;
Joerg Roedelb514e552008-09-17 17:14:27 +0200490}
491
492/*
Joerg Roedelb65233a2008-07-11 17:14:21 +0200493 * After reading the highest device id from the IOMMU PCI capability header
494 * this function looks if there is a higher device id defined in the ACPI table
495 */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200496static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
497{
498 u8 *p = (void *)h, *end = (void *)h;
499 struct ivhd_entry *dev;
500
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400501 u32 ivhd_size = get_ivhd_header_size(h);
502
503 if (!ivhd_size) {
504 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
505 return -EINVAL;
506 }
507
508 p += ivhd_size;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200509 end += h->length;
510
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200511 while (p < end) {
512 dev = (struct ivhd_entry *)p;
513 switch (dev->type) {
Joerg Roedeld1259412015-10-20 17:33:43 +0200514 case IVHD_DEV_ALL:
515 /* Use maximum BDF value for DEV_ALL */
516 update_last_devid(0xffff);
517 break;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200518 case IVHD_DEV_SELECT:
519 case IVHD_DEV_RANGE_END:
520 case IVHD_DEV_ALIAS:
521 case IVHD_DEV_EXT_SELECT:
Joerg Roedelb65233a2008-07-11 17:14:21 +0200522 /* all the above subfield types refer to device ids */
Joerg Roedel208ec8c2008-07-11 17:14:24 +0200523 update_last_devid(dev->devid);
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200524 break;
525 default:
526 break;
527 }
Joerg Roedelb514e552008-09-17 17:14:27 +0200528 p += ivhd_entry_length(p);
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200529 }
530
531 WARN_ON(p != end);
532
533 return 0;
534}
535
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400536static int __init check_ivrs_checksum(struct acpi_table_header *table)
537{
538 int i;
539 u8 checksum = 0, *p = (u8 *)table;
540
541 for (i = 0; i < table->length; ++i)
542 checksum += p[i];
543 if (checksum != 0) {
544 /* ACPI table corrupt */
545 pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
546 return -ENODEV;
547 }
548
549 return 0;
550}
551
Joerg Roedelb65233a2008-07-11 17:14:21 +0200552/*
553 * Iterate over all IVHD entries in the ACPI table and find the highest device
554 * id which we need to handle. This is the first of three functions which parse
555 * the ACPI table. So we check the checksum here.
556 */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200557static int __init find_last_devid_acpi(struct acpi_table_header *table)
558{
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400559 u8 *p = (u8 *)table, *end = (u8 *)table;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200560 struct ivhd_header *h;
561
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200562 p += IVRS_HEADER_LENGTH;
563
564 end += table->length;
565 while (p < end) {
566 h = (struct ivhd_header *)p;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400567 if (h->type == amd_iommu_target_ivhd_type) {
568 int ret = find_last_devid_from_ivhd(h);
569
570 if (ret)
571 return ret;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200572 }
573 p += h->length;
574 }
575 WARN_ON(p != end);
576
577 return 0;
578}
579
Joerg Roedelb65233a2008-07-11 17:14:21 +0200580/****************************************************************************
581 *
Frank Arnolddf805ab2012-08-27 19:21:04 +0200582 * The following functions belong to the code path which parses the ACPI table
Joerg Roedelb65233a2008-07-11 17:14:21 +0200583 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
584 * data structures, initialize the device/alias/rlookup table and also
585 * basically initialize the hardware.
586 *
587 ****************************************************************************/
588
589/*
590 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
591 * write commands to that buffer later and the IOMMU will execute them
592 * asynchronously
593 */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200594static int __init alloc_command_buffer(struct amd_iommu *iommu)
Joerg Roedelb36ca912008-06-26 21:27:45 +0200595{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200596 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
597 get_order(CMD_BUFFER_SIZE));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200598
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200599 return iommu->cmd_buf ? 0 : -ENOMEM;
Joerg Roedel58492e12009-05-04 18:41:16 +0200600}
601
602/*
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200603 * This function resets the command buffer if the IOMMU stopped fetching
604 * commands from it.
605 */
606void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
607{
608 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
609
610 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
611 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Tom Lendackyd334a562017-06-05 14:52:12 -0500612 iommu->cmd_buf_head = 0;
613 iommu->cmd_buf_tail = 0;
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200614
615 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
616}
617
618/*
Joerg Roedel58492e12009-05-04 18:41:16 +0200619 * This function writes the command buffer address to the hardware and
620 * enables it.
621 */
622static void iommu_enable_command_buffer(struct amd_iommu *iommu)
623{
624 u64 entry;
625
626 BUG_ON(iommu->cmd_buf == NULL);
627
628 entry = (u64)virt_to_phys(iommu->cmd_buf);
Joerg Roedelb36ca912008-06-26 21:27:45 +0200629 entry |= MMIO_CMD_SIZE_512;
Joerg Roedel58492e12009-05-04 18:41:16 +0200630
Joerg Roedelb36ca912008-06-26 21:27:45 +0200631 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
Joerg Roedel58492e12009-05-04 18:41:16 +0200632 &entry, sizeof(entry));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200633
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200634 amd_iommu_reset_cmd_buffer(iommu);
Joerg Roedelb36ca912008-06-26 21:27:45 +0200635}
636
Baoquan He78d313c2017-08-09 16:33:34 +0800637/*
638 * This function disables the command buffer
639 */
640static void iommu_disable_command_buffer(struct amd_iommu *iommu)
641{
642 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
643}
644
Joerg Roedelb36ca912008-06-26 21:27:45 +0200645static void __init free_command_buffer(struct amd_iommu *iommu)
646{
Joerg Roedeldeba4bc2015-10-20 17:33:41 +0200647 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200648}
649
Joerg Roedel335503e2008-09-05 14:29:07 +0200650/* allocates the memory where the IOMMU will log its events to */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200651static int __init alloc_event_buffer(struct amd_iommu *iommu)
Joerg Roedel335503e2008-09-05 14:29:07 +0200652{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200653 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
654 get_order(EVT_BUFFER_SIZE));
Joerg Roedel335503e2008-09-05 14:29:07 +0200655
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200656 return iommu->evt_buf ? 0 : -ENOMEM;
Joerg Roedel58492e12009-05-04 18:41:16 +0200657}
658
659static void iommu_enable_event_buffer(struct amd_iommu *iommu)
660{
661 u64 entry;
662
663 BUG_ON(iommu->evt_buf == NULL);
664
Joerg Roedel335503e2008-09-05 14:29:07 +0200665 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
Joerg Roedel58492e12009-05-04 18:41:16 +0200666
Joerg Roedel335503e2008-09-05 14:29:07 +0200667 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
668 &entry, sizeof(entry));
669
Joerg Roedel090672072009-06-15 16:06:48 +0200670 /* set head and tail to zero manually */
671 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
672 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
673
Joerg Roedel58492e12009-05-04 18:41:16 +0200674 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
Joerg Roedel335503e2008-09-05 14:29:07 +0200675}
676
Baoquan He78d313c2017-08-09 16:33:34 +0800677/*
678 * This function disables the event log buffer
679 */
680static void iommu_disable_event_buffer(struct amd_iommu *iommu)
681{
682 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
683}
684
Joerg Roedel335503e2008-09-05 14:29:07 +0200685static void __init free_event_buffer(struct amd_iommu *iommu)
686{
687 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
688}
689
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100690/* allocates the memory where the IOMMU will log its events to */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200691static int __init alloc_ppr_log(struct amd_iommu *iommu)
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100692{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200693 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
694 get_order(PPR_LOG_SIZE));
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100695
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200696 return iommu->ppr_log ? 0 : -ENOMEM;
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100697}
698
699static void iommu_enable_ppr_log(struct amd_iommu *iommu)
700{
701 u64 entry;
702
703 if (iommu->ppr_log == NULL)
704 return;
705
706 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
707
708 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
709 &entry, sizeof(entry));
710
711 /* set head and tail to zero manually */
712 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
713 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
714
715 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
716 iommu_feature_enable(iommu, CONTROL_PPR_EN);
717}
718
719static void __init free_ppr_log(struct amd_iommu *iommu)
720{
721 if (iommu->ppr_log == NULL)
722 return;
723
724 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
725}
726
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500727static void free_ga_log(struct amd_iommu *iommu)
728{
729#ifdef CONFIG_IRQ_REMAP
730 if (iommu->ga_log)
731 free_pages((unsigned long)iommu->ga_log,
732 get_order(GA_LOG_SIZE));
733 if (iommu->ga_log_tail)
734 free_pages((unsigned long)iommu->ga_log_tail,
735 get_order(8));
736#endif
737}
738
739static int iommu_ga_log_enable(struct amd_iommu *iommu)
740{
741#ifdef CONFIG_IRQ_REMAP
742 u32 status, i;
743
744 if (!iommu->ga_log)
745 return -EINVAL;
746
747 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
748
749 /* Check if already running */
750 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
751 return 0;
752
753 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
754 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
755
756 for (i = 0; i < LOOP_TIMEOUT; ++i) {
757 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
758 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
759 break;
760 }
761
762 if (i >= LOOP_TIMEOUT)
763 return -EINVAL;
764#endif /* CONFIG_IRQ_REMAP */
765 return 0;
766}
767
768#ifdef CONFIG_IRQ_REMAP
769static int iommu_init_ga_log(struct amd_iommu *iommu)
770{
771 u64 entry;
772
773 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
774 return 0;
775
776 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
777 get_order(GA_LOG_SIZE));
778 if (!iommu->ga_log)
779 goto err_out;
780
781 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
782 get_order(8));
783 if (!iommu->ga_log_tail)
784 goto err_out;
785
786 entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
787 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
788 &entry, sizeof(entry));
789 entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
790 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
791 &entry, sizeof(entry));
792 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
793 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
794
795 return 0;
796err_out:
797 free_ga_log(iommu);
798 return -EINVAL;
799}
800#endif /* CONFIG_IRQ_REMAP */
801
802static int iommu_init_ga(struct amd_iommu *iommu)
803{
804 int ret = 0;
805
806#ifdef CONFIG_IRQ_REMAP
807 /* Note: We have already checked GASup from IVRS table.
808 * Now, we need to make sure that GAMSup is set.
809 */
810 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
811 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
812 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
813
814 ret = iommu_init_ga_log(iommu);
815#endif /* CONFIG_IRQ_REMAP */
816
817 return ret;
818}
819
Joerg Roedelcbc33a92011-11-25 11:41:31 +0100820static void iommu_enable_gt(struct amd_iommu *iommu)
821{
822 if (!iommu_feature(iommu, FEATURE_GT))
823 return;
824
825 iommu_feature_enable(iommu, CONTROL_GT_EN);
826}
827
Joerg Roedelb65233a2008-07-11 17:14:21 +0200828/* sets a specific bit in the device table entry. */
Joerg Roedel3566b772008-06-26 21:27:46 +0200829static void set_dev_entry_bit(u16 devid, u8 bit)
830{
Joerg Roedelee6c2862011-11-09 12:06:03 +0100831 int i = (bit >> 6) & 0x03;
832 int _bit = bit & 0x3f;
Joerg Roedel3566b772008-06-26 21:27:46 +0200833
Joerg Roedelee6c2862011-11-09 12:06:03 +0100834 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
Joerg Roedel3566b772008-06-26 21:27:46 +0200835}
836
Joerg Roedelc5cca142009-10-09 18:31:20 +0200837static int get_dev_entry_bit(u16 devid, u8 bit)
838{
Joerg Roedelee6c2862011-11-09 12:06:03 +0100839 int i = (bit >> 6) & 0x03;
840 int _bit = bit & 0x3f;
Joerg Roedelc5cca142009-10-09 18:31:20 +0200841
Joerg Roedelee6c2862011-11-09 12:06:03 +0100842 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
Joerg Roedelc5cca142009-10-09 18:31:20 +0200843}
844
845
846void amd_iommu_apply_erratum_63(u16 devid)
847{
848 int sysmgt;
849
850 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
851 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
852
853 if (sysmgt == 0x01)
854 set_dev_entry_bit(devid, DEV_ENTRY_IW);
855}
856
Joerg Roedel5ff47892008-07-14 20:11:18 +0200857/* Writes the specific IOMMU for a device into the rlookup table */
858static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
859{
860 amd_iommu_rlookup_table[devid] = iommu;
861}
862
Joerg Roedelb65233a2008-07-11 17:14:21 +0200863/*
864 * This function takes the device specific flags read from the ACPI
865 * table and sets up the device table entry with that information
866 */
Joerg Roedel5ff47892008-07-14 20:11:18 +0200867static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
868 u16 devid, u32 flags, u32 ext_flags)
Joerg Roedel3566b772008-06-26 21:27:46 +0200869{
870 if (flags & ACPI_DEVFLAG_INITPASS)
871 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
872 if (flags & ACPI_DEVFLAG_EXTINT)
873 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
874 if (flags & ACPI_DEVFLAG_NMI)
875 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
876 if (flags & ACPI_DEVFLAG_SYSMGT1)
877 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
878 if (flags & ACPI_DEVFLAG_SYSMGT2)
879 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
880 if (flags & ACPI_DEVFLAG_LINT0)
881 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
882 if (flags & ACPI_DEVFLAG_LINT1)
883 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
Joerg Roedel3566b772008-06-26 21:27:46 +0200884
Joerg Roedelc5cca142009-10-09 18:31:20 +0200885 amd_iommu_apply_erratum_63(devid);
886
Joerg Roedel5ff47892008-07-14 20:11:18 +0200887 set_iommu_for_device(iommu, devid);
Joerg Roedel3566b772008-06-26 21:27:46 +0200888}
889
Joerg Roedelc50e3242014-09-09 15:59:37 +0200890static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
Joerg Roedel6efed632012-06-14 15:52:58 +0200891{
892 struct devid_map *entry;
893 struct list_head *list;
894
Joerg Roedel31cff672013-04-09 16:53:58 +0200895 if (type == IVHD_SPECIAL_IOAPIC)
896 list = &ioapic_map;
897 else if (type == IVHD_SPECIAL_HPET)
898 list = &hpet_map;
899 else
Joerg Roedel6efed632012-06-14 15:52:58 +0200900 return -EINVAL;
901
Joerg Roedel31cff672013-04-09 16:53:58 +0200902 list_for_each_entry(entry, list, list) {
903 if (!(entry->id == id && entry->cmd_line))
904 continue;
905
906 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
907 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
908
Joerg Roedelc50e3242014-09-09 15:59:37 +0200909 *devid = entry->devid;
910
Joerg Roedel31cff672013-04-09 16:53:58 +0200911 return 0;
912 }
913
Joerg Roedel6efed632012-06-14 15:52:58 +0200914 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
915 if (!entry)
916 return -ENOMEM;
917
Joerg Roedel31cff672013-04-09 16:53:58 +0200918 entry->id = id;
Joerg Roedelc50e3242014-09-09 15:59:37 +0200919 entry->devid = *devid;
Joerg Roedel31cff672013-04-09 16:53:58 +0200920 entry->cmd_line = cmd_line;
Joerg Roedel6efed632012-06-14 15:52:58 +0200921
922 list_add_tail(&entry->list, list);
923
924 return 0;
925}
926
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400927static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
928 bool cmd_line)
929{
930 struct acpihid_map_entry *entry;
931 struct list_head *list = &acpihid_map;
932
933 list_for_each_entry(entry, list, list) {
934 if (strcmp(entry->hid, hid) ||
935 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
936 !entry->cmd_line)
937 continue;
938
939 pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
940 hid, uid);
941 *devid = entry->devid;
942 return 0;
943 }
944
945 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
946 if (!entry)
947 return -ENOMEM;
948
949 memcpy(entry->uid, uid, strlen(uid));
950 memcpy(entry->hid, hid, strlen(hid));
951 entry->devid = *devid;
952 entry->cmd_line = cmd_line;
953 entry->root_devid = (entry->devid & (~0x7));
954
955 pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
956 entry->cmd_line ? "cmd" : "ivrs",
957 entry->hid, entry->uid, entry->root_devid);
958
959 list_add_tail(&entry->list, list);
960 return 0;
961}
962
Joerg Roedel235dacb2013-04-09 17:53:14 +0200963static int __init add_early_maps(void)
964{
965 int i, ret;
966
967 for (i = 0; i < early_ioapic_map_size; ++i) {
968 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
969 early_ioapic_map[i].id,
Joerg Roedelc50e3242014-09-09 15:59:37 +0200970 &early_ioapic_map[i].devid,
Joerg Roedel235dacb2013-04-09 17:53:14 +0200971 early_ioapic_map[i].cmd_line);
972 if (ret)
973 return ret;
974 }
975
976 for (i = 0; i < early_hpet_map_size; ++i) {
977 ret = add_special_device(IVHD_SPECIAL_HPET,
978 early_hpet_map[i].id,
Joerg Roedelc50e3242014-09-09 15:59:37 +0200979 &early_hpet_map[i].devid,
Joerg Roedel235dacb2013-04-09 17:53:14 +0200980 early_hpet_map[i].cmd_line);
981 if (ret)
982 return ret;
983 }
984
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400985 for (i = 0; i < early_acpihid_map_size; ++i) {
986 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
987 early_acpihid_map[i].uid,
988 &early_acpihid_map[i].devid,
989 early_acpihid_map[i].cmd_line);
990 if (ret)
991 return ret;
992 }
993
Joerg Roedel235dacb2013-04-09 17:53:14 +0200994 return 0;
995}
996
Joerg Roedelb65233a2008-07-11 17:14:21 +0200997/*
Frank Arnolddf805ab2012-08-27 19:21:04 +0200998 * Reads the device exclusion range from ACPI and initializes the IOMMU with
Joerg Roedelb65233a2008-07-11 17:14:21 +0200999 * it
1000 */
Joerg Roedel3566b772008-06-26 21:27:46 +02001001static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1002{
1003 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1004
1005 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1006 return;
1007
1008 if (iommu) {
Joerg Roedelb65233a2008-07-11 17:14:21 +02001009 /*
1010 * We only can configure exclusion ranges per IOMMU, not
1011 * per device. But we can enable the exclusion range per
1012 * device. This is done here
1013 */
Su Friendy2c16c9f2014-05-07 13:54:52 +08001014 set_dev_entry_bit(devid, DEV_ENTRY_EX);
Joerg Roedel3566b772008-06-26 21:27:46 +02001015 iommu->exclusion_start = m->range_start;
1016 iommu->exclusion_length = m->range_length;
1017 }
1018}
1019
Joerg Roedelb65233a2008-07-11 17:14:21 +02001020/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02001021 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1022 * initializes the hardware and our data structures with it.
1023 */
Joerg Roedel6efed632012-06-14 15:52:58 +02001024static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001025 struct ivhd_header *h)
1026{
1027 u8 *p = (u8 *)h;
1028 u8 *end = p, flags = 0;
Joerg Roedel0de66d52011-06-06 16:04:02 +02001029 u16 devid = 0, devid_start = 0, devid_to = 0;
1030 u32 dev_i, ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001031 bool alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001032 struct ivhd_entry *e;
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001033 u32 ivhd_size;
Joerg Roedel235dacb2013-04-09 17:53:14 +02001034 int ret;
1035
1036
1037 ret = add_early_maps();
1038 if (ret)
1039 return ret;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001040
1041 /*
Joerg Roedele9bf5192010-09-20 14:33:07 +02001042 * First save the recommended feature enable bits from ACPI
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001043 */
Joerg Roedele9bf5192010-09-20 14:33:07 +02001044 iommu->acpi_flags = h->flags;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001045
1046 /*
1047 * Done. Now parse the device entries
1048 */
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001049 ivhd_size = get_ivhd_header_size(h);
1050 if (!ivhd_size) {
1051 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
1052 return -EINVAL;
1053 }
1054
1055 p += ivhd_size;
1056
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001057 end += h->length;
1058
Joerg Roedel42a698f2009-05-20 15:41:28 +02001059
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001060 while (p < end) {
1061 e = (struct ivhd_entry *)p;
1062 switch (e->type) {
1063 case IVHD_DEV_ALL:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001064
Joerg Roedel226e8892015-10-20 17:33:44 +02001065 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
Joerg Roedel42a698f2009-05-20 15:41:28 +02001066
Joerg Roedel226e8892015-10-20 17:33:44 +02001067 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1068 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001069 break;
1070 case IVHD_DEV_SELECT:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001071
1072 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1073 "flags: %02x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001074 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001075 PCI_SLOT(e->devid),
1076 PCI_FUNC(e->devid),
1077 e->flags);
1078
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001079 devid = e->devid;
Joerg Roedel5ff47892008-07-14 20:11:18 +02001080 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001081 break;
1082 case IVHD_DEV_SELECT_RANGE_START:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001083
1084 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1085 "devid: %02x:%02x.%x flags: %02x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001086 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001087 PCI_SLOT(e->devid),
1088 PCI_FUNC(e->devid),
1089 e->flags);
1090
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001091 devid_start = e->devid;
1092 flags = e->flags;
1093 ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001094 alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001095 break;
1096 case IVHD_DEV_ALIAS:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001097
1098 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1099 "flags: %02x devid_to: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001100 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001101 PCI_SLOT(e->devid),
1102 PCI_FUNC(e->devid),
1103 e->flags,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001104 PCI_BUS_NUM(e->ext >> 8),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001105 PCI_SLOT(e->ext >> 8),
1106 PCI_FUNC(e->ext >> 8));
1107
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001108 devid = e->devid;
1109 devid_to = e->ext >> 8;
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001110 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
Neil Turton7455aab2009-05-14 14:08:11 +01001111 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001112 amd_iommu_alias_table[devid] = devid_to;
1113 break;
1114 case IVHD_DEV_ALIAS_RANGE:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001115
1116 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1117 "devid: %02x:%02x.%x flags: %02x "
1118 "devid_to: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001119 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001120 PCI_SLOT(e->devid),
1121 PCI_FUNC(e->devid),
1122 e->flags,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001123 PCI_BUS_NUM(e->ext >> 8),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001124 PCI_SLOT(e->ext >> 8),
1125 PCI_FUNC(e->ext >> 8));
1126
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001127 devid_start = e->devid;
1128 flags = e->flags;
1129 devid_to = e->ext >> 8;
1130 ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001131 alias = true;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001132 break;
1133 case IVHD_DEV_EXT_SELECT:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001134
1135 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1136 "flags: %02x ext: %08x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001137 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001138 PCI_SLOT(e->devid),
1139 PCI_FUNC(e->devid),
1140 e->flags, e->ext);
1141
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001142 devid = e->devid;
Joerg Roedel5ff47892008-07-14 20:11:18 +02001143 set_dev_entry_from_acpi(iommu, devid, e->flags,
1144 e->ext);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001145 break;
1146 case IVHD_DEV_EXT_SELECT_RANGE:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001147
1148 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1149 "%02x:%02x.%x flags: %02x ext: %08x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001150 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001151 PCI_SLOT(e->devid),
1152 PCI_FUNC(e->devid),
1153 e->flags, e->ext);
1154
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001155 devid_start = e->devid;
1156 flags = e->flags;
1157 ext_flags = e->ext;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001158 alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001159 break;
1160 case IVHD_DEV_RANGE_END:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001161
1162 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001163 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001164 PCI_SLOT(e->devid),
1165 PCI_FUNC(e->devid));
1166
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001167 devid = e->devid;
1168 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001169 if (alias) {
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001170 amd_iommu_alias_table[dev_i] = devid_to;
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001171 set_dev_entry_from_acpi(iommu,
1172 devid_to, flags, ext_flags);
1173 }
1174 set_dev_entry_from_acpi(iommu, dev_i,
1175 flags, ext_flags);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001176 }
1177 break;
Joerg Roedel6efed632012-06-14 15:52:58 +02001178 case IVHD_DEV_SPECIAL: {
1179 u8 handle, type;
1180 const char *var;
1181 u16 devid;
1182 int ret;
1183
1184 handle = e->ext & 0xff;
1185 devid = (e->ext >> 8) & 0xffff;
1186 type = (e->ext >> 24) & 0xff;
1187
1188 if (type == IVHD_SPECIAL_IOAPIC)
1189 var = "IOAPIC";
1190 else if (type == IVHD_SPECIAL_HPET)
1191 var = "HPET";
1192 else
1193 var = "UNKNOWN";
1194
1195 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1196 var, (int)handle,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001197 PCI_BUS_NUM(devid),
Joerg Roedel6efed632012-06-14 15:52:58 +02001198 PCI_SLOT(devid),
1199 PCI_FUNC(devid));
1200
Joerg Roedelc50e3242014-09-09 15:59:37 +02001201 ret = add_special_device(type, handle, &devid, false);
Joerg Roedel6efed632012-06-14 15:52:58 +02001202 if (ret)
1203 return ret;
Joerg Roedelc50e3242014-09-09 15:59:37 +02001204
1205 /*
1206 * add_special_device might update the devid in case a
1207 * command-line override is present. So call
1208 * set_dev_entry_from_acpi after add_special_device.
1209 */
1210 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1211
Joerg Roedel6efed632012-06-14 15:52:58 +02001212 break;
1213 }
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001214 case IVHD_DEV_ACPI_HID: {
1215 u16 devid;
1216 u8 hid[ACPIHID_HID_LEN] = {0};
1217 u8 uid[ACPIHID_UID_LEN] = {0};
1218 int ret;
1219
1220 if (h->type != 0x40) {
1221 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1222 e->type);
1223 break;
1224 }
1225
1226 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1227 hid[ACPIHID_HID_LEN - 1] = '\0';
1228
1229 if (!(*hid)) {
1230 pr_err(FW_BUG "Invalid HID.\n");
1231 break;
1232 }
1233
1234 switch (e->uidf) {
1235 case UID_NOT_PRESENT:
1236
1237 if (e->uidl != 0)
1238 pr_warn(FW_BUG "Invalid UID length.\n");
1239
1240 break;
1241 case UID_IS_INTEGER:
1242
1243 sprintf(uid, "%d", e->uid);
1244
1245 break;
1246 case UID_IS_CHARACTER:
1247
1248 memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1249 uid[ACPIHID_UID_LEN - 1] = '\0';
1250
1251 break;
1252 default:
1253 break;
1254 }
1255
Nicolas Iooss6082ee72016-06-26 10:33:29 +02001256 devid = e->devid;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001257 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1258 hid, uid,
1259 PCI_BUS_NUM(devid),
1260 PCI_SLOT(devid),
1261 PCI_FUNC(devid));
1262
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001263 flags = e->flags;
1264
1265 ret = add_acpi_hid_device(hid, uid, &devid, false);
1266 if (ret)
1267 return ret;
1268
1269 /*
1270 * add_special_device might update the devid in case a
1271 * command-line override is present. So call
1272 * set_dev_entry_from_acpi after add_special_device.
1273 */
1274 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1275
1276 break;
1277 }
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001278 default:
1279 break;
1280 }
1281
Joerg Roedelb514e552008-09-17 17:14:27 +02001282 p += ivhd_entry_length(p);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001283 }
Joerg Roedel6efed632012-06-14 15:52:58 +02001284
1285 return 0;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001286}
1287
Joerg Roedele47d4022008-06-26 21:27:48 +02001288static void __init free_iommu_one(struct amd_iommu *iommu)
1289{
1290 free_command_buffer(iommu);
Joerg Roedel335503e2008-09-05 14:29:07 +02001291 free_event_buffer(iommu);
Joerg Roedel1a29ac02011-11-10 15:41:40 +01001292 free_ppr_log(iommu);
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001293 free_ga_log(iommu);
Joerg Roedele47d4022008-06-26 21:27:48 +02001294 iommu_unmap_mmio_space(iommu);
1295}
1296
1297static void __init free_iommu_all(void)
1298{
1299 struct amd_iommu *iommu, *next;
1300
Joerg Roedel3bd22172009-05-04 15:06:20 +02001301 for_each_iommu_safe(iommu, next) {
Joerg Roedele47d4022008-06-26 21:27:48 +02001302 list_del(&iommu->list);
1303 free_iommu_one(iommu);
1304 kfree(iommu);
1305 }
1306}
1307
Joerg Roedelb65233a2008-07-11 17:14:21 +02001308/*
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001309 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1310 * Workaround:
1311 * BIOS should disable L2B micellaneous clock gating by setting
1312 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1313 */
Nikola Pajkovskye2f1a3b2013-02-26 16:12:05 +01001314static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001315{
1316 u32 value;
1317
1318 if ((boot_cpu_data.x86 != 0x15) ||
1319 (boot_cpu_data.x86_model < 0x10) ||
1320 (boot_cpu_data.x86_model > 0x1f))
1321 return;
1322
1323 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1324 pci_read_config_dword(iommu->dev, 0xf4, &value);
1325
1326 if (value & BIT(2))
1327 return;
1328
1329 /* Select NB indirect register 0x90 and enable writing */
1330 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1331
1332 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1333 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1334 dev_name(&iommu->dev->dev));
1335
1336 /* Clear the enable writing bit */
1337 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1338}
1339
1340/*
Jay Cornwall358875f2016-02-10 15:48:01 -06001341 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1342 * Workaround:
1343 * BIOS should enable ATS write permission check by setting
1344 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1345 */
1346static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1347{
1348 u32 value;
1349
1350 if ((boot_cpu_data.x86 != 0x15) ||
1351 (boot_cpu_data.x86_model < 0x30) ||
1352 (boot_cpu_data.x86_model > 0x3f))
1353 return;
1354
1355 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1356 value = iommu_read_l2(iommu, 0x47);
1357
1358 if (value & BIT(0))
1359 return;
1360
1361 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1362 iommu_write_l2(iommu, 0x47, value | BIT(0));
1363
1364 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1365 dev_name(&iommu->dev->dev));
1366}
1367
1368/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02001369 * This function clues the initialization function for one IOMMU
1370 * together and also allocates the command buffer and programs the
1371 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1372 */
Joerg Roedele47d4022008-06-26 21:27:48 +02001373static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1374{
Joerg Roedel6efed632012-06-14 15:52:58 +02001375 int ret;
1376
Joerg Roedele47d4022008-06-26 21:27:48 +02001377 spin_lock_init(&iommu->lock);
Joerg Roedelbb527772009-11-20 14:31:51 +01001378
1379 /* Add IOMMU to internal data structures */
Joerg Roedele47d4022008-06-26 21:27:48 +02001380 list_add_tail(&iommu->list, &amd_iommu_list);
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06001381 iommu->index = amd_iommus_present++;
Joerg Roedelbb527772009-11-20 14:31:51 +01001382
1383 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1384 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1385 return -ENOSYS;
1386 }
1387
1388 /* Index is fine - add IOMMU to the array */
1389 amd_iommus[iommu->index] = iommu;
Joerg Roedele47d4022008-06-26 21:27:48 +02001390
1391 /*
1392 * Copy data from ACPI table entry to the iommu struct
1393 */
Joerg Roedel23c742d2012-06-12 11:47:34 +02001394 iommu->devid = h->devid;
Joerg Roedele47d4022008-06-26 21:27:48 +02001395 iommu->cap_ptr = h->cap_ptr;
Joerg Roedelee893c22008-09-08 14:48:04 +02001396 iommu->pci_seg = h->pci_seg;
Joerg Roedele47d4022008-06-26 21:27:48 +02001397 iommu->mmio_phys = h->mmio_phys;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001398
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001399 switch (h->type) {
1400 case 0x10:
1401 /* Check if IVHD EFR contains proper max banks/counters */
1402 if ((h->efr_attr != 0) &&
1403 ((h->efr_attr & (0xF << 13)) != 0) &&
1404 ((h->efr_attr & (0x3F << 17)) != 0))
1405 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1406 else
1407 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001408 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1409 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001410 break;
1411 case 0x11:
1412 case 0x40:
1413 if (h->efr_reg & (1 << 9))
1414 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1415 else
1416 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001417 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1418 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001419 break;
1420 default:
1421 return -EINVAL;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001422 }
1423
1424 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1425 iommu->mmio_phys_end);
Joerg Roedele47d4022008-06-26 21:27:48 +02001426 if (!iommu->mmio_base)
1427 return -ENOMEM;
1428
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001429 if (alloc_command_buffer(iommu))
Joerg Roedele47d4022008-06-26 21:27:48 +02001430 return -ENOMEM;
1431
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001432 if (alloc_event_buffer(iommu))
Joerg Roedel335503e2008-09-05 14:29:07 +02001433 return -ENOMEM;
1434
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001435 iommu->int_enabled = false;
1436
Baoquan He4c232a72017-08-09 16:33:33 +08001437 init_translation_status(iommu);
1438
1439 if (translation_pre_enabled(iommu))
1440 pr_warn("Translation is already enabled - trying to copy translation structures\n");
1441
Joerg Roedel6efed632012-06-14 15:52:58 +02001442 ret = init_iommu_from_acpi(iommu, h);
1443 if (ret)
1444 return ret;
Joerg Roedelf6fec002012-06-21 16:51:25 +02001445
Jiang Liu7c71d302015-04-13 14:11:33 +08001446 ret = amd_iommu_create_irq_domain(iommu);
1447 if (ret)
1448 return ret;
1449
Joerg Roedelf6fec002012-06-21 16:51:25 +02001450 /*
1451 * Make sure IOMMU is not considered to translate itself. The IVRS
1452 * table tells us so, but this is a lie!
1453 */
1454 amd_iommu_rlookup_table[iommu->devid] = NULL;
1455
Joerg Roedel23c742d2012-06-12 11:47:34 +02001456 return 0;
Joerg Roedele47d4022008-06-26 21:27:48 +02001457}
1458
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001459/**
1460 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1461 * @ivrs Pointer to the IVRS header
1462 *
1463 * This function search through all IVDB of the maximum supported IVHD
1464 */
1465static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1466{
1467 u8 *base = (u8 *)ivrs;
1468 struct ivhd_header *ivhd = (struct ivhd_header *)
1469 (base + IVRS_HEADER_LENGTH);
1470 u8 last_type = ivhd->type;
1471 u16 devid = ivhd->devid;
1472
1473 while (((u8 *)ivhd - base < ivrs->length) &&
1474 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1475 u8 *p = (u8 *) ivhd;
1476
1477 if (ivhd->devid == devid)
1478 last_type = ivhd->type;
1479 ivhd = (struct ivhd_header *)(p + ivhd->length);
1480 }
1481
1482 return last_type;
1483}
1484
Joerg Roedelb65233a2008-07-11 17:14:21 +02001485/*
1486 * Iterates over all IOMMU entries in the ACPI table, allocates the
1487 * IOMMU structure and initializes it with init_iommu_one()
1488 */
Joerg Roedele47d4022008-06-26 21:27:48 +02001489static int __init init_iommu_all(struct acpi_table_header *table)
1490{
1491 u8 *p = (u8 *)table, *end = (u8 *)table;
1492 struct ivhd_header *h;
1493 struct amd_iommu *iommu;
1494 int ret;
1495
Joerg Roedele47d4022008-06-26 21:27:48 +02001496 end += table->length;
1497 p += IVRS_HEADER_LENGTH;
1498
1499 while (p < end) {
1500 h = (struct ivhd_header *)p;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001501 if (*p == amd_iommu_target_ivhd_type) {
Joerg Roedel9c720412009-05-20 13:53:57 +02001502
Joerg Roedelae908c22009-09-01 16:52:16 +02001503 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
Joerg Roedel9c720412009-05-20 13:53:57 +02001504 "seg: %d flags: %01x info %04x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001505 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
Joerg Roedel9c720412009-05-20 13:53:57 +02001506 PCI_FUNC(h->devid), h->cap_ptr,
1507 h->pci_seg, h->flags, h->info);
1508 DUMP_printk(" mmio-addr: %016llx\n",
1509 h->mmio_phys);
1510
Joerg Roedele47d4022008-06-26 21:27:48 +02001511 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02001512 if (iommu == NULL)
1513 return -ENOMEM;
Joerg Roedel3551a702010-03-01 13:52:19 +01001514
Joerg Roedele47d4022008-06-26 21:27:48 +02001515 ret = init_iommu_one(iommu, h);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02001516 if (ret)
1517 return ret;
Joerg Roedele47d4022008-06-26 21:27:48 +02001518 }
1519 p += h->length;
1520
1521 }
1522 WARN_ON(p != end);
1523
1524 return 0;
1525}
1526
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06001527static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1528 u8 fxn, u64 *value, bool is_write);
Steven L Kinney30861dd2013-06-05 16:11:48 -05001529
1530static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1531{
1532 u64 val = 0xabcd, val2 = 0;
1533
1534 if (!iommu_feature(iommu, FEATURE_PC))
1535 return;
1536
1537 amd_iommu_pc_present = true;
1538
1539 /* Check if the performance counters can be written to */
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06001540 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1541 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
Steven L Kinney30861dd2013-06-05 16:11:48 -05001542 (val != val2)) {
1543 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1544 amd_iommu_pc_present = false;
1545 return;
1546 }
1547
1548 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1549
1550 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1551 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1552 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1553}
1554
Alex Williamson066f2e92014-06-12 16:12:37 -06001555static ssize_t amd_iommu_show_cap(struct device *dev,
1556 struct device_attribute *attr,
1557 char *buf)
1558{
Joerg Roedelb7a42b92017-02-28 13:57:18 +01001559 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
Alex Williamson066f2e92014-06-12 16:12:37 -06001560 return sprintf(buf, "%x\n", iommu->cap);
1561}
1562static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1563
1564static ssize_t amd_iommu_show_features(struct device *dev,
1565 struct device_attribute *attr,
1566 char *buf)
1567{
Joerg Roedelb7a42b92017-02-28 13:57:18 +01001568 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
Alex Williamson066f2e92014-06-12 16:12:37 -06001569 return sprintf(buf, "%llx\n", iommu->features);
1570}
1571static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1572
1573static struct attribute *amd_iommu_attrs[] = {
1574 &dev_attr_cap.attr,
1575 &dev_attr_features.attr,
1576 NULL,
1577};
1578
1579static struct attribute_group amd_iommu_group = {
1580 .name = "amd-iommu",
1581 .attrs = amd_iommu_attrs,
1582};
1583
1584static const struct attribute_group *amd_iommu_groups[] = {
1585 &amd_iommu_group,
1586 NULL,
1587};
Steven L Kinney30861dd2013-06-05 16:11:48 -05001588
Joerg Roedel23c742d2012-06-12 11:47:34 +02001589static int iommu_init_pci(struct amd_iommu *iommu)
1590{
1591 int cap_ptr = iommu->cap_ptr;
1592 u32 range, misc, low, high;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001593 int ret;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001594
Shuah Khanc5081cd2013-02-27 17:07:19 -07001595 iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
Joerg Roedel23c742d2012-06-12 11:47:34 +02001596 iommu->devid & 0xff);
1597 if (!iommu->dev)
1598 return -ENODEV;
1599
Jiang Liucbbc00b2015-10-09 22:07:31 +08001600 /* Prevent binding other PCI device drivers to IOMMU devices */
1601 iommu->dev->match_driver = false;
1602
Joerg Roedel23c742d2012-06-12 11:47:34 +02001603 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1604 &iommu->cap);
1605 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1606 &range);
1607 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1608 &misc);
1609
Joerg Roedel23c742d2012-06-12 11:47:34 +02001610 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1611 amd_iommu_iotlb_sup = false;
1612
1613 /* read extended feature bits */
1614 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1615 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1616
1617 iommu->features = ((u64)high << 32) | low;
1618
1619 if (iommu_feature(iommu, FEATURE_GT)) {
1620 int glxval;
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001621 u32 max_pasid;
1622 u64 pasmax;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001623
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001624 pasmax = iommu->features & FEATURE_PASID_MASK;
1625 pasmax >>= FEATURE_PASID_SHIFT;
1626 max_pasid = (1 << (pasmax + 1)) - 1;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001627
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001628 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1629
1630 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001631
1632 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1633 glxval >>= FEATURE_GLXVAL_SHIFT;
1634
1635 if (amd_iommu_max_glx_val == -1)
1636 amd_iommu_max_glx_val = glxval;
1637 else
1638 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1639 }
1640
1641 if (iommu_feature(iommu, FEATURE_GT) &&
1642 iommu_feature(iommu, FEATURE_PPR)) {
1643 iommu->is_iommu_v2 = true;
1644 amd_iommu_v2_present = true;
1645 }
1646
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001647 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1648 return -ENOMEM;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001649
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001650 ret = iommu_init_ga(iommu);
1651 if (ret)
1652 return ret;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001653
Joerg Roedel23c742d2012-06-12 11:47:34 +02001654 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1655 amd_iommu_np_cache = true;
1656
Steven L Kinney30861dd2013-06-05 16:11:48 -05001657 init_iommu_perf_ctr(iommu);
1658
Joerg Roedel23c742d2012-06-12 11:47:34 +02001659 if (is_rd890_iommu(iommu->dev)) {
1660 int i, j;
1661
1662 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1663 PCI_DEVFN(0, 0));
1664
1665 /*
1666 * Some rd890 systems may not be fully reconfigured by the
1667 * BIOS, so it's necessary for us to store this information so
1668 * it can be reprogrammed on resume
1669 */
1670 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1671 &iommu->stored_addr_lo);
1672 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1673 &iommu->stored_addr_hi);
1674
1675 /* Low bit locks writes to configuration space */
1676 iommu->stored_addr_lo &= ~1;
1677
1678 for (i = 0; i < 6; i++)
1679 for (j = 0; j < 0x12; j++)
1680 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1681
1682 for (i = 0; i < 0x83; i++)
1683 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1684 }
1685
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001686 amd_iommu_erratum_746_workaround(iommu);
Jay Cornwall358875f2016-02-10 15:48:01 -06001687 amd_iommu_ats_write_check_workaround(iommu);
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001688
Joerg Roedel39ab9552017-02-01 16:56:46 +01001689 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1690 amd_iommu_groups, "ivhd%d", iommu->index);
Joerg Roedelb0119e82017-02-01 13:23:08 +01001691 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1692 iommu_device_register(&iommu->iommu);
Alex Williamson066f2e92014-06-12 16:12:37 -06001693
Joerg Roedel23c742d2012-06-12 11:47:34 +02001694 return pci_enable_device(iommu->dev);
1695}
1696
Joerg Roedel4d121c32012-06-14 12:21:55 +02001697static void print_iommu_info(void)
1698{
1699 static const char * const feat_str[] = {
1700 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1701 "IA", "GA", "HE", "PC"
1702 };
1703 struct amd_iommu *iommu;
1704
1705 for_each_iommu(iommu) {
1706 int i;
1707
1708 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1709 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1710
1711 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001712 pr_info("AMD-Vi: Extended features (%#llx):\n",
1713 iommu->features);
Joerg Roedel2bd5ed02012-08-10 11:34:08 +02001714 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
Joerg Roedel4d121c32012-06-14 12:21:55 +02001715 if (iommu_feature(iommu, (1ULL << i)))
1716 pr_cont(" %s", feat_str[i]);
1717 }
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001718
1719 if (iommu->features & FEATURE_GAM_VAPIC)
1720 pr_cont(" GA_vAPIC");
1721
Steven L Kinney30861dd2013-06-05 16:11:48 -05001722 pr_cont("\n");
Borislav Petkov500c25e2012-09-28 16:22:26 +02001723 }
Joerg Roedel4d121c32012-06-14 12:21:55 +02001724 }
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001725 if (irq_remapping_enabled) {
Joerg Roedelebe60bb2012-07-02 18:36:03 +02001726 pr_info("AMD-Vi: Interrupt remapping enabled\n");
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001727 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1728 pr_info("AMD-Vi: virtual APIC enabled\n");
1729 }
Joerg Roedel4d121c32012-06-14 12:21:55 +02001730}
1731
Joerg Roedel2c0ae172012-06-12 15:59:30 +02001732static int __init amd_iommu_init_pci(void)
Joerg Roedel23c742d2012-06-12 11:47:34 +02001733{
1734 struct amd_iommu *iommu;
1735 int ret = 0;
1736
1737 for_each_iommu(iommu) {
1738 ret = iommu_init_pci(iommu);
1739 if (ret)
1740 break;
1741 }
1742
Joerg Roedel522e5cb72016-07-01 16:42:55 +02001743 /*
1744 * Order is important here to make sure any unity map requirements are
1745 * fulfilled. The unity mappings are created and written to the device
1746 * table during the amd_iommu_init_api() call.
1747 *
1748 * After that we call init_device_table_dma() to make sure any
1749 * uninitialized DTE will block DMA, and in the end we flush the caches
1750 * of all IOMMUs to make sure the changes to the device table are
1751 * active.
1752 */
1753 ret = amd_iommu_init_api();
1754
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02001755 init_device_table_dma();
Joerg Roedel23c742d2012-06-12 11:47:34 +02001756
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02001757 for_each_iommu(iommu)
1758 iommu_flush_all_caches(iommu);
1759
Joerg Roedel3a18404c2015-05-28 18:41:45 +02001760 if (!ret)
1761 print_iommu_info();
Joerg Roedel4d121c32012-06-14 12:21:55 +02001762
Joerg Roedel23c742d2012-06-12 11:47:34 +02001763 return ret;
1764}
1765
Joerg Roedelb65233a2008-07-11 17:14:21 +02001766/****************************************************************************
1767 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001768 * The following functions initialize the MSI interrupts for all IOMMUs
Frank Arnolddf805ab2012-08-27 19:21:04 +02001769 * in the system. It's a bit challenging because there could be multiple
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001770 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1771 * pci_dev.
1772 *
1773 ****************************************************************************/
1774
Joerg Roedel9f800de2009-11-23 12:45:25 +01001775static int iommu_setup_msi(struct amd_iommu *iommu)
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001776{
1777 int r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001778
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001779 r = pci_enable_msi(iommu->dev);
1780 if (r)
1781 return r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001782
Joerg Roedel72fe00f2011-05-10 10:50:42 +02001783 r = request_threaded_irq(iommu->dev->irq,
1784 amd_iommu_int_handler,
1785 amd_iommu_int_thread,
1786 0, "AMD-Vi",
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -05001787 iommu);
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001788
1789 if (r) {
1790 pci_disable_msi(iommu->dev);
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001791 return r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001792 }
1793
Joerg Roedelfab6afa2009-05-04 18:46:34 +02001794 iommu->int_enabled = true;
Joerg Roedel1a29ac02011-11-10 15:41:40 +01001795
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001796 return 0;
1797}
1798
Joerg Roedel05f92db2009-05-12 09:52:46 +02001799static int iommu_init_msi(struct amd_iommu *iommu)
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001800{
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001801 int ret;
1802
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001803 if (iommu->int_enabled)
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001804 goto enable_faults;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001805
Yijing Wang82fcfc62013-08-08 21:12:36 +08001806 if (iommu->dev->msi_cap)
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001807 ret = iommu_setup_msi(iommu);
1808 else
1809 ret = -ENODEV;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001810
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001811 if (ret)
1812 return ret;
1813
1814enable_faults:
1815 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1816
1817 if (iommu->ppr_log != NULL)
1818 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1819
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001820 iommu_ga_log_enable(iommu);
1821
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001822 return 0;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001823}
1824
1825/****************************************************************************
1826 *
Joerg Roedelb65233a2008-07-11 17:14:21 +02001827 * The next functions belong to the third pass of parsing the ACPI
1828 * table. In this last pass the memory mapping requirements are
Frank Arnolddf805ab2012-08-27 19:21:04 +02001829 * gathered (like exclusion and unity mapping ranges).
Joerg Roedelb65233a2008-07-11 17:14:21 +02001830 *
1831 ****************************************************************************/
1832
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001833static void __init free_unity_maps(void)
1834{
1835 struct unity_map_entry *entry, *next;
1836
1837 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1838 list_del(&entry->list);
1839 kfree(entry);
1840 }
1841}
1842
Joerg Roedelb65233a2008-07-11 17:14:21 +02001843/* called when we find an exclusion range definition in ACPI */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001844static int __init init_exclusion_range(struct ivmd_header *m)
1845{
1846 int i;
1847
1848 switch (m->type) {
1849 case ACPI_IVMD_TYPE:
1850 set_device_exclusion_range(m->devid, m);
1851 break;
1852 case ACPI_IVMD_TYPE_ALL:
Joerg Roedel3a61ec32008-07-25 13:07:50 +02001853 for (i = 0; i <= amd_iommu_last_bdf; ++i)
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001854 set_device_exclusion_range(i, m);
1855 break;
1856 case ACPI_IVMD_TYPE_RANGE:
1857 for (i = m->devid; i <= m->aux; ++i)
1858 set_device_exclusion_range(i, m);
1859 break;
1860 default:
1861 break;
1862 }
1863
1864 return 0;
1865}
1866
Joerg Roedelb65233a2008-07-11 17:14:21 +02001867/* called for unity map ACPI definition */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001868static int __init init_unity_map_range(struct ivmd_header *m)
1869{
Joerg Roedel98f1ad22012-07-06 13:28:37 +02001870 struct unity_map_entry *e = NULL;
Joerg Roedel02acc432009-05-20 16:24:21 +02001871 char *s;
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001872
1873 e = kzalloc(sizeof(*e), GFP_KERNEL);
1874 if (e == NULL)
1875 return -ENOMEM;
1876
1877 switch (m->type) {
1878 default:
Joerg Roedel0bc252f2009-05-22 12:48:05 +02001879 kfree(e);
1880 return 0;
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001881 case ACPI_IVMD_TYPE:
Joerg Roedel02acc432009-05-20 16:24:21 +02001882 s = "IVMD_TYPEi\t\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001883 e->devid_start = e->devid_end = m->devid;
1884 break;
1885 case ACPI_IVMD_TYPE_ALL:
Joerg Roedel02acc432009-05-20 16:24:21 +02001886 s = "IVMD_TYPE_ALL\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001887 e->devid_start = 0;
1888 e->devid_end = amd_iommu_last_bdf;
1889 break;
1890 case ACPI_IVMD_TYPE_RANGE:
Joerg Roedel02acc432009-05-20 16:24:21 +02001891 s = "IVMD_TYPE_RANGE\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001892 e->devid_start = m->devid;
1893 e->devid_end = m->aux;
1894 break;
1895 }
1896 e->address_start = PAGE_ALIGN(m->range_start);
1897 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1898 e->prot = m->flags >> 1;
1899
Joerg Roedel02acc432009-05-20 16:24:21 +02001900 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1901 " range_start: %016llx range_end: %016llx flags: %x\n", s,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001902 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
1903 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
Joerg Roedel02acc432009-05-20 16:24:21 +02001904 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1905 e->address_start, e->address_end, m->flags);
1906
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001907 list_add_tail(&e->list, &amd_iommu_unity_map);
1908
1909 return 0;
1910}
1911
Joerg Roedelb65233a2008-07-11 17:14:21 +02001912/* iterates over all memory definitions we find in the ACPI table */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001913static int __init init_memory_definitions(struct acpi_table_header *table)
1914{
1915 u8 *p = (u8 *)table, *end = (u8 *)table;
1916 struct ivmd_header *m;
1917
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001918 end += table->length;
1919 p += IVRS_HEADER_LENGTH;
1920
1921 while (p < end) {
1922 m = (struct ivmd_header *)p;
1923 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1924 init_exclusion_range(m);
1925 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1926 init_unity_map_range(m);
1927
1928 p += m->length;
1929 }
1930
1931 return 0;
1932}
1933
Joerg Roedelb65233a2008-07-11 17:14:21 +02001934/*
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02001935 * Init the device table to not allow DMA access for devices and
1936 * suppress all page faults
1937 */
Joerg Roedel33f28c52012-06-15 18:03:31 +02001938static void init_device_table_dma(void)
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02001939{
Joerg Roedel0de66d52011-06-06 16:04:02 +02001940 u32 devid;
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02001941
1942 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1943 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1944 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
Joerg Roedel54bd6352017-06-15 10:36:22 +02001945 /*
1946 * In kdump kernels in-flight DMA from the old kernel might
1947 * cause IO_PAGE_FAULTs. There are no reports that a kdump
1948 * actually failed because of that, so just disable fault
1949 * reporting in the hardware to get rid of the messages
1950 */
1951 if (is_kdump_kernel())
1952 set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02001953 }
1954}
1955
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02001956static void __init uninit_device_table_dma(void)
1957{
1958 u32 devid;
1959
1960 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1961 amd_iommu_dev_table[devid].data[0] = 0ULL;
1962 amd_iommu_dev_table[devid].data[1] = 0ULL;
1963 }
1964}
1965
Joerg Roedel33f28c52012-06-15 18:03:31 +02001966static void init_device_table(void)
1967{
1968 u32 devid;
1969
1970 if (!amd_iommu_irq_remap)
1971 return;
1972
1973 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1974 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1975}
1976
Joerg Roedele9bf5192010-09-20 14:33:07 +02001977static void iommu_init_flags(struct amd_iommu *iommu)
1978{
1979 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1980 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1981 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1982
1983 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1984 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1985 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1986
1987 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1988 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1989 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1990
1991 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1992 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1993 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1994
1995 /*
1996 * make IOMMU memory accesses cache coherent
1997 */
1998 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
Joerg Roedel1456e9d2011-12-22 14:51:53 +01001999
2000 /* Set IOTLB invalidation timeout to 1s */
2001 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
Joerg Roedele9bf5192010-09-20 14:33:07 +02002002}
2003
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002004static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
Joerg Roedel4c894f42010-09-23 15:15:19 +02002005{
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002006 int i, j;
2007 u32 ioc_feature_control;
Joerg Roedelc1bf94e2012-05-31 17:38:11 +02002008 struct pci_dev *pdev = iommu->root_pdev;
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002009
2010 /* RD890 BIOSes may not have completely reconfigured the iommu */
Joerg Roedelc1bf94e2012-05-31 17:38:11 +02002011 if (!is_rd890_iommu(iommu->dev) || !pdev)
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002012 return;
2013
2014 /*
2015 * First, we need to ensure that the iommu is enabled. This is
2016 * controlled by a register in the northbridge
2017 */
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002018
2019 /* Select Northbridge indirect register 0x75 and enable writing */
2020 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2021 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2022
2023 /* Enable the iommu */
2024 if (!(ioc_feature_control & 0x1))
2025 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2026
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002027 /* Restore the iommu BAR */
2028 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2029 iommu->stored_addr_lo);
2030 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2031 iommu->stored_addr_hi);
2032
2033 /* Restore the l1 indirect regs for each of the 6 l1s */
2034 for (i = 0; i < 6; i++)
2035 for (j = 0; j < 0x12; j++)
2036 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2037
2038 /* Restore the l2 indirect regs */
2039 for (i = 0; i < 0x83; i++)
2040 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2041
2042 /* Lock PCI setup registers */
2043 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2044 iommu->stored_addr_lo | 1);
Joerg Roedel4c894f42010-09-23 15:15:19 +02002045}
2046
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002047static void iommu_enable_ga(struct amd_iommu *iommu)
2048{
2049#ifdef CONFIG_IRQ_REMAP
2050 switch (amd_iommu_guest_ir) {
2051 case AMD_IOMMU_GUEST_IR_VAPIC:
2052 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2053 /* Fall through */
2054 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2055 iommu_feature_enable(iommu, CONTROL_GA_EN);
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05002056 iommu->irte_ops = &irte_128_ops;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002057 break;
2058 default:
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05002059 iommu->irte_ops = &irte_32_ops;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002060 break;
2061 }
2062#endif
2063}
2064
Baoquan He78d313c2017-08-09 16:33:34 +08002065static void early_enable_iommu(struct amd_iommu *iommu)
2066{
2067 iommu_disable(iommu);
2068 iommu_init_flags(iommu);
2069 iommu_set_device_table(iommu);
2070 iommu_enable_command_buffer(iommu);
2071 iommu_enable_event_buffer(iommu);
2072 iommu_set_exclusion_range(iommu);
2073 iommu_enable_ga(iommu);
2074 iommu_enable(iommu);
2075 iommu_flush_all_caches(iommu);
2076}
2077
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002078/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02002079 * This function finally enables all IOMMUs found in the system after
2080 * they have been initialized
2081 */
Joerg Roedel11ee5ac2012-06-12 16:30:06 +02002082static void early_enable_iommus(void)
Joerg Roedel87361972008-06-26 21:28:07 +02002083{
2084 struct amd_iommu *iommu;
2085
Baoquan He78d313c2017-08-09 16:33:34 +08002086 for_each_iommu(iommu)
2087 early_enable_iommu(iommu);
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002088
2089#ifdef CONFIG_IRQ_REMAP
2090 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2091 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2092#endif
Joerg Roedel87361972008-06-26 21:28:07 +02002093}
2094
Joerg Roedel11ee5ac2012-06-12 16:30:06 +02002095static void enable_iommus_v2(void)
2096{
2097 struct amd_iommu *iommu;
2098
2099 for_each_iommu(iommu) {
2100 iommu_enable_ppr_log(iommu);
2101 iommu_enable_gt(iommu);
2102 }
2103}
2104
2105static void enable_iommus(void)
2106{
2107 early_enable_iommus();
2108
2109 enable_iommus_v2();
2110}
2111
Joerg Roedel92ac4322009-05-19 19:06:27 +02002112static void disable_iommus(void)
2113{
2114 struct amd_iommu *iommu;
2115
2116 for_each_iommu(iommu)
2117 iommu_disable(iommu);
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002118
2119#ifdef CONFIG_IRQ_REMAP
2120 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2121 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2122#endif
Joerg Roedel92ac4322009-05-19 19:06:27 +02002123}
2124
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002125/*
2126 * Suspend/Resume support
2127 * disable suspend until real resume implemented
2128 */
2129
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002130static void amd_iommu_resume(void)
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002131{
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002132 struct amd_iommu *iommu;
2133
2134 for_each_iommu(iommu)
2135 iommu_apply_resume_quirks(iommu);
2136
Joerg Roedel736501e2009-05-12 09:56:12 +02002137 /* re-load the hardware */
2138 enable_iommus();
Joerg Roedel3d9761e2012-03-15 16:39:21 +01002139
2140 amd_iommu_enable_interrupts();
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002141}
2142
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002143static int amd_iommu_suspend(void)
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002144{
Joerg Roedel736501e2009-05-12 09:56:12 +02002145 /* disable IOMMUs to go out of the way for BIOS */
2146 disable_iommus();
2147
2148 return 0;
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002149}
2150
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002151static struct syscore_ops amd_iommu_syscore_ops = {
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002152 .suspend = amd_iommu_suspend,
2153 .resume = amd_iommu_resume,
2154};
2155
Joerg Roedel90b3eb02017-06-16 16:09:55 +02002156static void __init free_iommu_resources(void)
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002157{
Lucas Stachebcfa282016-10-26 13:09:53 +02002158 kmemleak_free(irq_lookup_table);
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002159 free_pages((unsigned long)irq_lookup_table,
2160 get_order(rlookup_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002161 irq_lookup_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002162
Julia Lawalla5919892015-09-13 14:15:31 +02002163 kmem_cache_destroy(amd_iommu_irq_cache);
2164 amd_iommu_irq_cache = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002165
2166 free_pages((unsigned long)amd_iommu_rlookup_table,
2167 get_order(rlookup_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002168 amd_iommu_rlookup_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002169
2170 free_pages((unsigned long)amd_iommu_alias_table,
2171 get_order(alias_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002172 amd_iommu_alias_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002173
2174 free_pages((unsigned long)amd_iommu_dev_table,
2175 get_order(dev_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002176 amd_iommu_dev_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002177
2178 free_iommu_all();
2179
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002180#ifdef CONFIG_GART_IOMMU
2181 /*
2182 * We failed to initialize the AMD IOMMU - try fallback to GART
2183 * if possible.
2184 */
2185 gart_iommu_init();
2186
2187#endif
2188}
2189
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002190/* SB IOAPIC is always on this device in AMD systems */
2191#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2192
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002193static bool __init check_ioapic_information(void)
2194{
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002195 const char *fw_bug = FW_BUG;
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002196 bool ret, has_sb_ioapic;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002197 int idx;
2198
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002199 has_sb_ioapic = false;
2200 ret = false;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002201
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002202 /*
2203 * If we have map overrides on the kernel command line the
2204 * messages in this function might not describe firmware bugs
2205 * anymore - so be careful
2206 */
2207 if (cmdline_maps)
2208 fw_bug = "";
2209
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002210 for (idx = 0; idx < nr_ioapics; idx++) {
2211 int devid, id = mpc_ioapic_id(idx);
2212
2213 devid = get_ioapic_devid(id);
2214 if (devid < 0) {
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002215 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
2216 fw_bug, id);
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002217 ret = false;
2218 } else if (devid == IOAPIC_SB_DEVID) {
2219 has_sb_ioapic = true;
2220 ret = true;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002221 }
2222 }
2223
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002224 if (!has_sb_ioapic) {
2225 /*
2226 * We expect the SB IOAPIC to be listed in the IVRS
2227 * table. The system timer is connected to the SB IOAPIC
2228 * and if we don't have it in the list the system will
2229 * panic at boot time. This situation usually happens
2230 * when the BIOS is buggy and provides us the wrong
2231 * device id for the IOAPIC in the system.
2232 */
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002233 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002234 }
2235
2236 if (!ret)
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002237 pr_err("AMD-Vi: Disabling interrupt remapping\n");
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002238
2239 return ret;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002240}
2241
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002242static void __init free_dma_resources(void)
2243{
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002244 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2245 get_order(MAX_DOMAIN_ID/8));
Joerg Roedelf6019272017-06-16 16:09:58 +02002246 amd_iommu_pd_alloc_bitmap = NULL;
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002247
2248 free_unity_maps();
2249}
2250
Joerg Roedelb65233a2008-07-11 17:14:21 +02002251/*
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002252 * This is the hardware init function for AMD IOMMU in the system.
2253 * This function is called either from amd_iommu_init or from the interrupt
2254 * remapping setup code.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002255 *
2256 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002257 * four times:
Joerg Roedelb65233a2008-07-11 17:14:21 +02002258 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002259 * 1 pass) Discover the most comprehensive IVHD type to use.
2260 *
2261 * 2 pass) Find the highest PCI device id the driver has to handle.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002262 * Upon this information the size of the data structures is
2263 * determined that needs to be allocated.
2264 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002265 * 3 pass) Initialize the data structures just allocated with the
Joerg Roedelb65233a2008-07-11 17:14:21 +02002266 * information in the ACPI table about available AMD IOMMUs
2267 * in the system. It also maps the PCI devices in the
2268 * system to specific IOMMUs
2269 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002270 * 4 pass) After the basic data structures are allocated and
Joerg Roedelb65233a2008-07-11 17:14:21 +02002271 * initialized we update them with information about memory
2272 * remapping requirements parsed out of the ACPI table in
2273 * this last pass.
2274 *
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002275 * After everything is set up the IOMMUs are enabled and the necessary
2276 * hotplug and suspend notifiers are registered.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002277 */
Joerg Roedel643511b2012-06-12 12:09:35 +02002278static int __init early_amd_iommu_init(void)
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002279{
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002280 struct acpi_table_header *ivrs_base;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002281 acpi_status status;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002282 int i, remap_cache_sz, ret = 0;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002283
Joerg Roedel643511b2012-06-12 12:09:35 +02002284 if (!amd_iommu_detected)
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002285 return -ENODEV;
2286
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002287 status = acpi_get_table("IVRS", 0, &ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002288 if (status == AE_NOT_FOUND)
2289 return -ENODEV;
2290 else if (ACPI_FAILURE(status)) {
2291 const char *err = acpi_format_exception(status);
2292 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2293 return -EINVAL;
2294 }
2295
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002296 /*
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002297 * Validate checksum here so we don't need to do it when
2298 * we actually parse the table
2299 */
2300 ret = check_ivrs_checksum(ivrs_base);
2301 if (ret)
Rafael J. Wysocki99e8ccd2017-01-10 14:57:28 +01002302 goto out;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002303
2304 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2305 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2306
2307 /*
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002308 * First parse ACPI tables to find the largest Bus/Dev/Func
2309 * we need to handle. Upon this information the shared data
2310 * structures for the IOMMUs in the system will be allocated
2311 */
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002312 ret = find_last_devid_acpi(ivrs_base);
2313 if (ret)
Joerg Roedel3551a702010-03-01 13:52:19 +01002314 goto out;
2315
Joerg Roedelc5714842008-07-11 17:14:25 +02002316 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2317 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2318 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002319
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002320 /* Device table - directly used by all IOMMUs */
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002321 ret = -ENOMEM;
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002322 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002323 get_order(dev_table_size));
2324 if (amd_iommu_dev_table == NULL)
2325 goto out;
2326
2327 /*
2328 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2329 * IOMMU see for that device
2330 */
2331 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2332 get_order(alias_table_size));
2333 if (amd_iommu_alias_table == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002334 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002335
2336 /* IOMMU rlookup table - find the IOMMU for a specific device */
Joerg Roedel83fd5cc2008-12-16 19:17:11 +01002337 amd_iommu_rlookup_table = (void *)__get_free_pages(
2338 GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002339 get_order(rlookup_table_size));
2340 if (amd_iommu_rlookup_table == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002341 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002342
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002343 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2344 GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002345 get_order(MAX_DOMAIN_ID/8));
2346 if (amd_iommu_pd_alloc_bitmap == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002347 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002348
2349 /*
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002350 * let all alias entries point to itself
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002351 */
Joerg Roedel3a61ec32008-07-25 13:07:50 +02002352 for (i = 0; i <= amd_iommu_last_bdf; ++i)
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002353 amd_iommu_alias_table[i] = i;
2354
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002355 /*
2356 * never allocate domain 0 because its used as the non-allocated and
2357 * error value placeholder
2358 */
Baoquan He5c87f622016-09-15 16:50:51 +08002359 __set_bit(0, amd_iommu_pd_alloc_bitmap);
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002360
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002361 spin_lock_init(&amd_iommu_pd_lock);
2362
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002363 /*
2364 * now the data structures are allocated and basically initialized
2365 * start the real acpi table scan
2366 */
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002367 ret = init_iommu_all(ivrs_base);
2368 if (ret)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002369 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002370
Joerg Roedel11123742017-06-16 16:09:54 +02002371 /* Disable any previously enabled IOMMUs */
2372 disable_iommus();
2373
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002374 if (amd_iommu_irq_remap)
2375 amd_iommu_irq_remap = check_ioapic_information();
2376
Joerg Roedel05152a02012-06-15 16:53:51 +02002377 if (amd_iommu_irq_remap) {
2378 /*
2379 * Interrupt remapping enabled, create kmem_cache for the
2380 * remapping tables.
2381 */
Wei Yongjun83ed9c12013-04-23 10:47:44 +08002382 ret = -ENOMEM;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002383 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2384 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2385 else
2386 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
Joerg Roedel05152a02012-06-15 16:53:51 +02002387 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002388 remap_cache_sz,
2389 IRQ_TABLE_ALIGNMENT,
2390 0, NULL);
Joerg Roedel05152a02012-06-15 16:53:51 +02002391 if (!amd_iommu_irq_cache)
2392 goto out;
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002393
2394 irq_lookup_table = (void *)__get_free_pages(
2395 GFP_KERNEL | __GFP_ZERO,
2396 get_order(rlookup_table_size));
Lucas Stachebcfa282016-10-26 13:09:53 +02002397 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2398 1, GFP_KERNEL);
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002399 if (!irq_lookup_table)
2400 goto out;
Joerg Roedel05152a02012-06-15 16:53:51 +02002401 }
2402
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002403 ret = init_memory_definitions(ivrs_base);
2404 if (ret)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002405 goto out;
Joerg Roedel3551a702010-03-01 13:52:19 +01002406
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002407 /* init the device table */
2408 init_device_table();
2409
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002410out:
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002411 /* Don't leak any ACPI memory */
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002412 acpi_put_table(ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002413 ivrs_base = NULL;
2414
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002415 return ret;
Joerg Roedel643511b2012-06-12 12:09:35 +02002416}
2417
Gerard Snitselaarae295142012-03-16 11:38:22 -07002418static int amd_iommu_enable_interrupts(void)
Joerg Roedel3d9761e2012-03-15 16:39:21 +01002419{
2420 struct amd_iommu *iommu;
2421 int ret = 0;
2422
2423 for_each_iommu(iommu) {
2424 ret = iommu_init_msi(iommu);
2425 if (ret)
2426 goto out;
2427 }
2428
2429out:
2430 return ret;
2431}
2432
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002433static bool detect_ivrs(void)
2434{
2435 struct acpi_table_header *ivrs_base;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002436 acpi_status status;
2437
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002438 status = acpi_get_table("IVRS", 0, &ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002439 if (status == AE_NOT_FOUND)
2440 return false;
2441 else if (ACPI_FAILURE(status)) {
2442 const char *err = acpi_format_exception(status);
2443 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2444 return false;
2445 }
2446
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002447 acpi_put_table(ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002448
Joerg Roedel1adb7d32012-08-06 14:18:42 +02002449 /* Make sure ACS will be enabled during PCI probe */
2450 pci_request_acs();
2451
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002452 return true;
2453}
2454
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002455/****************************************************************************
2456 *
2457 * AMD IOMMU Initialization State Machine
2458 *
2459 ****************************************************************************/
2460
2461static int __init state_next(void)
2462{
2463 int ret = 0;
2464
2465 switch (init_state) {
2466 case IOMMU_START_STATE:
2467 if (!detect_ivrs()) {
2468 init_state = IOMMU_NOT_FOUND;
2469 ret = -ENODEV;
2470 } else {
2471 init_state = IOMMU_IVRS_DETECTED;
2472 }
2473 break;
2474 case IOMMU_IVRS_DETECTED:
2475 ret = early_amd_iommu_init();
2476 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
Joerg Roedel7ad820e2017-06-16 16:09:59 +02002477 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2478 pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n");
2479 free_dma_resources();
2480 free_iommu_resources();
2481 init_state = IOMMU_CMDLINE_DISABLED;
2482 ret = -EINVAL;
2483 }
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002484 break;
2485 case IOMMU_ACPI_FINISHED:
2486 early_enable_iommus();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002487 x86_platform.iommu_shutdown = disable_iommus;
2488 init_state = IOMMU_ENABLED;
2489 break;
2490 case IOMMU_ENABLED:
Joerg Roedel74ddda72017-07-26 14:17:55 +02002491 register_syscore_ops(&amd_iommu_syscore_ops);
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002492 ret = amd_iommu_init_pci();
2493 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2494 enable_iommus_v2();
2495 break;
2496 case IOMMU_PCI_INIT:
2497 ret = amd_iommu_enable_interrupts();
2498 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2499 break;
2500 case IOMMU_INTERRUPTS_EN:
Joerg Roedel1e6a7b02015-07-28 16:58:48 +02002501 ret = amd_iommu_init_dma_ops();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002502 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2503 break;
2504 case IOMMU_DMA_OPS:
2505 init_state = IOMMU_INITIALIZED;
2506 break;
2507 case IOMMU_INITIALIZED:
2508 /* Nothing to do */
2509 break;
2510 case IOMMU_NOT_FOUND:
2511 case IOMMU_INIT_ERROR:
Joerg Roedel1b1e9422017-06-16 16:09:56 +02002512 case IOMMU_CMDLINE_DISABLED:
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002513 /* Error states => do nothing */
2514 ret = -EINVAL;
2515 break;
2516 default:
2517 /* Unknown state */
2518 BUG();
2519 }
2520
2521 return ret;
2522}
2523
2524static int __init iommu_go_to_state(enum iommu_init_state state)
2525{
Joerg Roedel151b0902017-06-16 16:09:57 +02002526 int ret = -EINVAL;
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002527
2528 while (init_state != state) {
Joerg Roedel1b1e9422017-06-16 16:09:56 +02002529 if (init_state == IOMMU_NOT_FOUND ||
2530 init_state == IOMMU_INIT_ERROR ||
2531 init_state == IOMMU_CMDLINE_DISABLED)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002532 break;
Joerg Roedel151b0902017-06-16 16:09:57 +02002533 ret = state_next();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002534 }
2535
2536 return ret;
2537}
2538
Joerg Roedel6b474b82012-06-26 16:46:04 +02002539#ifdef CONFIG_IRQ_REMAP
2540int __init amd_iommu_prepare(void)
2541{
Thomas Gleixner3f4cb7c2015-01-23 14:32:46 +01002542 int ret;
2543
Jiang Liu7fa1c842015-01-07 15:31:42 +08002544 amd_iommu_irq_remap = true;
Joerg Roedel84d07792015-01-07 15:31:39 +08002545
Thomas Gleixner3f4cb7c2015-01-23 14:32:46 +01002546 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2547 if (ret)
2548 return ret;
2549 return amd_iommu_irq_remap ? 0 : -ENODEV;
Joerg Roedel6b474b82012-06-26 16:46:04 +02002550}
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002551
Joerg Roedel6b474b82012-06-26 16:46:04 +02002552int __init amd_iommu_enable(void)
2553{
2554 int ret;
2555
2556 ret = iommu_go_to_state(IOMMU_ENABLED);
2557 if (ret)
2558 return ret;
2559
2560 irq_remapping_enabled = 1;
2561
2562 return 0;
2563}
2564
2565void amd_iommu_disable(void)
2566{
2567 amd_iommu_suspend();
2568}
2569
2570int amd_iommu_reenable(int mode)
2571{
2572 amd_iommu_resume();
2573
2574 return 0;
2575}
2576
2577int __init amd_iommu_enable_faulting(void)
2578{
2579 /* We enable MSI later when PCI is initialized */
2580 return 0;
2581}
2582#endif
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002583
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002584/*
2585 * This is the core init function for AMD IOMMU hardware in the system.
2586 * This function is called from the generic x86 DMA layer initialization
2587 * code.
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002588 */
2589static int __init amd_iommu_init(void)
2590{
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002591 int ret;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002592
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002593 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2594 if (ret) {
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002595 free_dma_resources();
2596 if (!irq_remapping_enabled) {
2597 disable_iommus();
Joerg Roedel90b3eb02017-06-16 16:09:55 +02002598 free_iommu_resources();
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002599 } else {
2600 struct amd_iommu *iommu;
2601
2602 uninit_device_table_dma();
2603 for_each_iommu(iommu)
2604 iommu_flush_all_caches(iommu);
2605 }
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002606 }
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002607
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002608 return ret;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002609}
2610
Joerg Roedelb65233a2008-07-11 17:14:21 +02002611/****************************************************************************
2612 *
2613 * Early detect code. This code runs at IOMMU detection time in the DMA
2614 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2615 * IOMMUs
2616 *
2617 ****************************************************************************/
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -04002618int __init amd_iommu_detect(void)
Joerg Roedelae7877d2008-06-26 21:27:51 +02002619{
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002620 int ret;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002621
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09002622 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -04002623 return -ENODEV;
Joerg Roedelae7877d2008-06-26 21:27:51 +02002624
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002625 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2626 if (ret)
2627 return ret;
Linus Torvalds11bd04f2009-12-11 12:18:16 -08002628
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002629 amd_iommu_detected = true;
2630 iommu_detected = 1;
2631 x86_init.iommu.iommu_init = amd_iommu_init;
2632
Jérôme Glisse4781bc42015-08-31 18:13:03 -04002633 return 1;
Joerg Roedelae7877d2008-06-26 21:27:51 +02002634}
2635
Joerg Roedelb65233a2008-07-11 17:14:21 +02002636/****************************************************************************
2637 *
2638 * Parsing functions for the AMD IOMMU specific kernel command line
2639 * options.
2640 *
2641 ****************************************************************************/
2642
Joerg Roedelfefda112009-05-20 12:21:42 +02002643static int __init parse_amd_iommu_dump(char *str)
2644{
2645 amd_iommu_dump = true;
2646
2647 return 1;
2648}
2649
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002650static int __init parse_amd_iommu_intr(char *str)
2651{
2652 for (; *str; ++str) {
2653 if (strncmp(str, "legacy", 6) == 0) {
2654 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2655 break;
2656 }
2657 if (strncmp(str, "vapic", 5) == 0) {
2658 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2659 break;
2660 }
2661 }
2662 return 1;
2663}
2664
Joerg Roedel918ad6c2008-06-26 21:27:52 +02002665static int __init parse_amd_iommu_options(char *str)
2666{
2667 for (; *str; ++str) {
Joerg Roedel695b5672008-11-17 15:16:43 +01002668 if (strncmp(str, "fullflush", 9) == 0)
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +09002669 amd_iommu_unmap_flush = true;
Joerg Roedela5235722010-05-11 17:12:33 +02002670 if (strncmp(str, "off", 3) == 0)
2671 amd_iommu_disabled = true;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002672 if (strncmp(str, "force_isolation", 15) == 0)
2673 amd_iommu_force_isolation = true;
Joerg Roedel918ad6c2008-06-26 21:27:52 +02002674 }
2675
2676 return 1;
2677}
2678
Joerg Roedel440e89982013-04-09 16:35:28 +02002679static int __init parse_ivrs_ioapic(char *str)
2680{
2681 unsigned int bus, dev, fn;
2682 int ret, id, i;
2683 u16 devid;
2684
2685 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2686
2687 if (ret != 4) {
2688 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2689 return 1;
2690 }
2691
2692 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2693 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2694 str);
2695 return 1;
2696 }
2697
2698 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2699
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002700 cmdline_maps = true;
Joerg Roedel440e89982013-04-09 16:35:28 +02002701 i = early_ioapic_map_size++;
2702 early_ioapic_map[i].id = id;
2703 early_ioapic_map[i].devid = devid;
2704 early_ioapic_map[i].cmd_line = true;
2705
2706 return 1;
2707}
2708
2709static int __init parse_ivrs_hpet(char *str)
2710{
2711 unsigned int bus, dev, fn;
2712 int ret, id, i;
2713 u16 devid;
2714
2715 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2716
2717 if (ret != 4) {
2718 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2719 return 1;
2720 }
2721
2722 if (early_hpet_map_size == EARLY_MAP_SIZE) {
2723 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2724 str);
2725 return 1;
2726 }
2727
2728 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2729
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002730 cmdline_maps = true;
Joerg Roedel440e89982013-04-09 16:35:28 +02002731 i = early_hpet_map_size++;
2732 early_hpet_map[i].id = id;
2733 early_hpet_map[i].devid = devid;
2734 early_hpet_map[i].cmd_line = true;
2735
2736 return 1;
2737}
2738
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04002739static int __init parse_ivrs_acpihid(char *str)
2740{
2741 u32 bus, dev, fn;
2742 char *hid, *uid, *p;
2743 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2744 int ret, i;
2745
2746 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2747 if (ret != 4) {
2748 pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
2749 return 1;
2750 }
2751
2752 p = acpiid;
2753 hid = strsep(&p, ":");
2754 uid = p;
2755
2756 if (!hid || !(*hid) || !uid) {
2757 pr_err("AMD-Vi: Invalid command line: hid or uid\n");
2758 return 1;
2759 }
2760
2761 i = early_acpihid_map_size++;
2762 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2763 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2764 early_acpihid_map[i].devid =
2765 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2766 early_acpihid_map[i].cmd_line = true;
2767
2768 return 1;
2769}
2770
Joerg Roedel440e89982013-04-09 16:35:28 +02002771__setup("amd_iommu_dump", parse_amd_iommu_dump);
2772__setup("amd_iommu=", parse_amd_iommu_options);
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002773__setup("amd_iommu_intr=", parse_amd_iommu_intr);
Joerg Roedel440e89982013-04-09 16:35:28 +02002774__setup("ivrs_ioapic", parse_ivrs_ioapic);
2775__setup("ivrs_hpet", parse_ivrs_hpet);
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04002776__setup("ivrs_acpihid", parse_ivrs_acpihid);
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -04002777
2778IOMMU_INIT_FINISH(amd_iommu_detect,
2779 gart_iommu_hole_init,
Joerg Roedel98f1ad22012-07-06 13:28:37 +02002780 NULL,
2781 NULL);
Joerg Roedel400a28a2011-11-28 15:11:02 +01002782
2783bool amd_iommu_v2_supported(void)
2784{
2785 return amd_iommu_v2_present;
2786}
2787EXPORT_SYMBOL(amd_iommu_v2_supported);
Steven L Kinney30861dd2013-06-05 16:11:48 -05002788
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002789struct amd_iommu *get_amd_iommu(unsigned int idx)
2790{
2791 unsigned int i = 0;
2792 struct amd_iommu *iommu;
2793
2794 for_each_iommu(iommu)
2795 if (i++ == idx)
2796 return iommu;
2797 return NULL;
2798}
2799EXPORT_SYMBOL(get_amd_iommu);
2800
Steven L Kinney30861dd2013-06-05 16:11:48 -05002801/****************************************************************************
2802 *
2803 * IOMMU EFR Performance Counter support functionality. This code allows
2804 * access to the IOMMU PC functionality.
2805 *
2806 ****************************************************************************/
2807
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002808u8 amd_iommu_pc_get_max_banks(unsigned int idx)
Steven L Kinney30861dd2013-06-05 16:11:48 -05002809{
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002810 struct amd_iommu *iommu = get_amd_iommu(idx);
Steven L Kinney30861dd2013-06-05 16:11:48 -05002811
Steven L Kinney30861dd2013-06-05 16:11:48 -05002812 if (iommu)
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002813 return iommu->max_banks;
Steven L Kinney30861dd2013-06-05 16:11:48 -05002814
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002815 return 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05002816}
2817EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2818
2819bool amd_iommu_pc_supported(void)
2820{
2821 return amd_iommu_pc_present;
2822}
2823EXPORT_SYMBOL(amd_iommu_pc_supported);
2824
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002825u8 amd_iommu_pc_get_max_counters(unsigned int idx)
Steven L Kinney30861dd2013-06-05 16:11:48 -05002826{
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002827 struct amd_iommu *iommu = get_amd_iommu(idx);
Steven L Kinney30861dd2013-06-05 16:11:48 -05002828
Steven L Kinney30861dd2013-06-05 16:11:48 -05002829 if (iommu)
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002830 return iommu->max_counters;
Steven L Kinney30861dd2013-06-05 16:11:48 -05002831
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002832 return 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05002833}
2834EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2835
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06002836static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
2837 u8 fxn, u64 *value, bool is_write)
Steven L Kinney30861dd2013-06-05 16:11:48 -05002838{
Steven L Kinney30861dd2013-06-05 16:11:48 -05002839 u32 offset;
2840 u32 max_offset_lim;
2841
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06002842 /* Make sure the IOMMU PC resource is available */
2843 if (!amd_iommu_pc_present)
2844 return -ENODEV;
2845
Steven L Kinney30861dd2013-06-05 16:11:48 -05002846 /* Check for valid iommu and pc register indexing */
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06002847 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
Steven L Kinney30861dd2013-06-05 16:11:48 -05002848 return -ENODEV;
2849
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06002850 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
Steven L Kinney30861dd2013-06-05 16:11:48 -05002851
2852 /* Limit the offset to the hw defined mmio region aperture */
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06002853 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
Steven L Kinney30861dd2013-06-05 16:11:48 -05002854 (iommu->max_counters << 8) | 0x28);
2855 if ((offset < MMIO_CNTR_REG_OFFSET) ||
2856 (offset > max_offset_lim))
2857 return -EINVAL;
2858
2859 if (is_write) {
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06002860 u64 val = *value & GENMASK_ULL(47, 0);
2861
2862 writel((u32)val, iommu->mmio_base + offset);
2863 writel((val >> 32), iommu->mmio_base + offset + 4);
Steven L Kinney30861dd2013-06-05 16:11:48 -05002864 } else {
2865 *value = readl(iommu->mmio_base + offset + 4);
2866 *value <<= 32;
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06002867 *value |= readl(iommu->mmio_base + offset);
2868 *value &= GENMASK_ULL(47, 0);
Steven L Kinney30861dd2013-06-05 16:11:48 -05002869 }
2870
2871 return 0;
2872}
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01002873
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06002874int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01002875{
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06002876 if (!iommu)
2877 return -EINVAL;
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01002878
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06002879 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01002880}
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06002881EXPORT_SYMBOL(amd_iommu_pc_get_reg);
2882
2883int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
2884{
2885 if (!iommu)
2886 return -EINVAL;
2887
2888 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
2889}
2890EXPORT_SYMBOL(amd_iommu_pc_set_reg);