blob: 5b81fd16f5faf81414bea4513593eca817720abc [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02002/*
Joerg Roedel5d0d7152010-10-13 11:13:21 +02003 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01004 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02005 * Leo Duran <leo.duran@amd.com>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02006 */
7
Joerg Roedel101fa032018-11-27 16:22:31 +01008#define pr_fmt(fmt) "AMD-Vi: " fmt
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06009#define dev_fmt(fmt) pr_fmt(fmt)
Joerg Roedel101fa032018-11-27 16:22:31 +010010
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020011#include <linux/pci.h>
12#include <linux/acpi.h>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020013#include <linux/list.h>
Baoquan He5c87f622016-09-15 16:50:51 +080014#include <linux/bitmap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +010016#include <linux/syscore_ops.h>
Joerg Roedela80dc3e2008-09-11 16:51:41 +020017#include <linux/interrupt.h>
18#include <linux/msi.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020019#include <linux/amd-iommu.h>
Joerg Roedel400a28a2011-11-28 15:11:02 +010020#include <linux/export.h>
Alex Williamson066f2e92014-06-12 16:12:37 -060021#include <linux/iommu.h>
Lucas Stachebcfa282016-10-26 13:09:53 +020022#include <linux/kmemleak.h>
Tom Lendacky2543a782017-07-17 16:10:24 -050023#include <linux/mem_encrypt.h>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020024#include <asm/pci-direct.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090025#include <asm/iommu.h>
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +000026#include <asm/apic.h>
27#include <asm/msidef.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010028#include <asm/gart.h>
FUJITA Tomonoriea1b0d32009-11-10 19:46:15 +090029#include <asm/x86_init.h>
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -040030#include <asm/iommu_table.h>
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +020031#include <asm/io_apic.h>
Joerg Roedel6b474b82012-06-26 16:46:04 +020032#include <asm/irq_remapping.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020033
Baoquan He3ac3e5ee2017-08-09 16:33:38 +080034#include <linux/crash_dump.h>
Kai-Heng Feng93d05152019-08-21 13:10:04 +080035#include "amd_iommu.h"
Joerg Roedel403f81d2011-06-14 16:44:25 +020036#include "amd_iommu_proto.h"
37#include "amd_iommu_types.h"
Joerg Roedel05152a02012-06-15 16:53:51 +020038#include "irq_remapping.h"
Joerg Roedel403f81d2011-06-14 16:44:25 +020039
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020040/*
41 * definitions for the ACPI scanning code
42 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020043#define IVRS_HEADER_LENGTH 48
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020044
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -040045#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020046#define ACPI_IVMD_TYPE_ALL 0x20
47#define ACPI_IVMD_TYPE 0x21
48#define ACPI_IVMD_TYPE_RANGE 0x22
49
50#define IVHD_DEV_ALL 0x01
51#define IVHD_DEV_SELECT 0x02
52#define IVHD_DEV_SELECT_RANGE_START 0x03
53#define IVHD_DEV_RANGE_END 0x04
54#define IVHD_DEV_ALIAS 0x42
55#define IVHD_DEV_ALIAS_RANGE 0x43
56#define IVHD_DEV_EXT_SELECT 0x46
57#define IVHD_DEV_EXT_SELECT_RANGE 0x47
Joerg Roedel6efed632012-06-14 15:52:58 +020058#define IVHD_DEV_SPECIAL 0x48
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -040059#define IVHD_DEV_ACPI_HID 0xf0
Joerg Roedel6efed632012-06-14 15:52:58 +020060
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -040061#define UID_NOT_PRESENT 0
62#define UID_IS_INTEGER 1
63#define UID_IS_CHARACTER 2
64
Joerg Roedel6efed632012-06-14 15:52:58 +020065#define IVHD_SPECIAL_IOAPIC 1
66#define IVHD_SPECIAL_HPET 2
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020067
Joerg Roedel6da73422009-05-04 11:44:38 +020068#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69#define IVHD_FLAG_PASSPW_EN_MASK 0x02
70#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71#define IVHD_FLAG_ISOC_EN_MASK 0x08
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020072
73#define IVMD_FLAG_EXCL_RANGE 0x08
Adrian Huang387caf02019-11-14 14:14:47 +080074#define IVMD_FLAG_IW 0x04
75#define IVMD_FLAG_IR 0x02
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020076#define IVMD_FLAG_UNITY_MAP 0x01
77
78#define ACPI_DEVFLAG_INITPASS 0x01
79#define ACPI_DEVFLAG_EXTINT 0x02
80#define ACPI_DEVFLAG_NMI 0x04
81#define ACPI_DEVFLAG_SYSMGT1 0x10
82#define ACPI_DEVFLAG_SYSMGT2 0x20
83#define ACPI_DEVFLAG_LINT0 0x40
84#define ACPI_DEVFLAG_LINT1 0x80
85#define ACPI_DEVFLAG_ATSDIS 0x10000000
86
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -050087#define LOOP_TIMEOUT 100000
Joerg Roedelb65233a2008-07-11 17:14:21 +020088/*
89 * ACPI table definitions
90 *
91 * These data structures are laid over the table to parse the important values
92 * out of it.
93 */
94
Joerg Roedelb0119e82017-02-01 13:23:08 +010095extern const struct iommu_ops amd_iommu_ops;
96
Joerg Roedelb65233a2008-07-11 17:14:21 +020097/*
98 * structure describing one IOMMU in the ACPI table. Typically followed by one
99 * or more ivhd_entrys.
100 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200101struct ivhd_header {
102 u8 type;
103 u8 flags;
104 u16 length;
105 u16 devid;
106 u16 cap_ptr;
107 u64 mmio_phys;
108 u16 pci_seg;
109 u16 info;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -0400110 u32 efr_attr;
111
112 /* Following only valid on IVHD type 11h and 40h */
113 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
114 u64 res;
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200115} __attribute__((packed));
116
Joerg Roedelb65233a2008-07-11 17:14:21 +0200117/*
118 * A device entry describing which devices a specific IOMMU translates and
119 * which requestor ids they use.
120 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200121struct ivhd_entry {
122 u8 type;
123 u16 devid;
124 u8 flags;
125 u32 ext;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400126 u32 hidh;
127 u64 cid;
128 u8 uidf;
129 u8 uidl;
130 u8 uid;
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200131} __attribute__((packed));
132
Joerg Roedelb65233a2008-07-11 17:14:21 +0200133/*
134 * An AMD IOMMU memory definition structure. It defines things like exclusion
135 * ranges for devices and regions that should be unity mapped.
136 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200137struct ivmd_header {
138 u8 type;
139 u8 flags;
140 u16 length;
141 u16 devid;
142 u16 aux;
143 u64 resv;
144 u64 range_start;
145 u64 range_length;
146} __attribute__((packed));
147
Joerg Roedelfefda112009-05-20 12:21:42 +0200148bool amd_iommu_dump;
Joerg Roedel05152a02012-06-15 16:53:51 +0200149bool amd_iommu_irq_remap __read_mostly;
Joerg Roedelfefda112009-05-20 12:21:42 +0200150
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -0500151int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
Suravee Suthikulpanit81307142019-11-20 07:55:48 -0600152static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -0500153
Joerg Roedel02f3b3f2012-06-11 17:45:25 +0200154static bool amd_iommu_detected;
Joerg Roedela5235722010-05-11 17:12:33 +0200155static bool __initdata amd_iommu_disabled;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400156static int amd_iommu_target_ivhd_type;
Joerg Roedelc1cbebe2008-07-03 19:35:10 +0200157
Joerg Roedelb65233a2008-07-11 17:14:21 +0200158u16 amd_iommu_last_bdf; /* largest PCI device id we have
159 to handle */
Joerg Roedel2e228472008-07-11 17:14:31 +0200160LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
Joerg Roedelb65233a2008-07-11 17:14:21 +0200161 we find in ACPI */
Viresh Kumar621a5f72015-09-26 15:04:07 -0700162bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
Joerg Roedel928abd22008-06-26 21:27:40 +0200163
Joerg Roedel2e228472008-07-11 17:14:31 +0200164LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
Joerg Roedelb65233a2008-07-11 17:14:21 +0200165 system */
166
Joerg Roedelbb527772009-11-20 14:31:51 +0100167/* Array to assign indices to IOMMUs*/
168struct amd_iommu *amd_iommus[MAX_IOMMUS];
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -0600169
170/* Number of IOMMUs present in the system */
171static int amd_iommus_present;
Joerg Roedelbb527772009-11-20 14:31:51 +0100172
Joerg Roedel318afd42009-11-23 18:32:38 +0100173/* IOMMUs have a non-present cache? */
174bool amd_iommu_np_cache __read_mostly;
Joerg Roedel60f723b2011-04-05 12:50:24 +0200175bool amd_iommu_iotlb_sup __read_mostly = true;
Joerg Roedel318afd42009-11-23 18:32:38 +0100176
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600177u32 amd_iommu_max_pasid __read_mostly = ~0;
Joerg Roedel62f71ab2011-11-10 14:41:57 +0100178
Joerg Roedel400a28a2011-11-28 15:11:02 +0100179bool amd_iommu_v2_present __read_mostly;
Joerg Roedel4160cd92015-08-13 11:31:48 +0200180static bool amd_iommu_pc_present __read_mostly;
Joerg Roedel400a28a2011-11-28 15:11:02 +0100181
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100182bool amd_iommu_force_isolation __read_mostly;
183
Joerg Roedelb65233a2008-07-11 17:14:21 +0200184/*
185 * Pointer to the device table which is shared by all AMD IOMMUs
186 * it is indexed by the PCI device id or the HT unit id and contains
187 * information about the domain the device belongs to as well as the
188 * page table root pointer.
189 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200190struct dev_table_entry *amd_iommu_dev_table;
Baoquan He45a01c42017-08-09 16:33:37 +0800191/*
192 * Pointer to a device table which the content of old device table
193 * will be copied to. It's only be used in kdump kernel.
194 */
195static struct dev_table_entry *old_dev_tbl_cpy;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200196
197/*
198 * The alias table is a driver specific data structure which contains the
199 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
200 * More than one device can share the same requestor id.
201 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200202u16 *amd_iommu_alias_table;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200203
204/*
205 * The rlookup table is used to find the IOMMU which is responsible
206 * for a specific device. It is also indexed by the PCI device id.
207 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200208struct amd_iommu **amd_iommu_rlookup_table;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800209EXPORT_SYMBOL(amd_iommu_rlookup_table);
Joerg Roedelb65233a2008-07-11 17:14:21 +0200210
211/*
Joerg Roedel0ea2c422012-06-15 18:05:20 +0200212 * This table is used to find the irq remapping table for a given device id
213 * quickly.
214 */
215struct irq_remap_table **irq_lookup_table;
216
217/*
Frank Arnolddf805ab2012-08-27 19:21:04 +0200218 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
Joerg Roedelb65233a2008-07-11 17:14:21 +0200219 * to know which ones are already in use.
220 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200221unsigned long *amd_iommu_pd_alloc_bitmap;
222
Joerg Roedelb65233a2008-07-11 17:14:21 +0200223static u32 dev_table_size; /* size of the device table */
224static u32 alias_table_size; /* size of the alias table */
225static u32 rlookup_table_size; /* size if the rlookup table */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200226
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200227enum iommu_init_state {
228 IOMMU_START_STATE,
229 IOMMU_IVRS_DETECTED,
230 IOMMU_ACPI_FINISHED,
231 IOMMU_ENABLED,
232 IOMMU_PCI_INIT,
233 IOMMU_INTERRUPTS_EN,
234 IOMMU_DMA_OPS,
235 IOMMU_INITIALIZED,
236 IOMMU_NOT_FOUND,
237 IOMMU_INIT_ERROR,
Joerg Roedel1b1e9422017-06-16 16:09:56 +0200238 IOMMU_CMDLINE_DISABLED,
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200239};
240
Joerg Roedel235dacb2013-04-09 17:53:14 +0200241/* Early ioapic and hpet maps from kernel command line */
242#define EARLY_MAP_SIZE 4
243static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
244static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400245static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
246
Joerg Roedel235dacb2013-04-09 17:53:14 +0200247static int __initdata early_ioapic_map_size;
248static int __initdata early_hpet_map_size;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400249static int __initdata early_acpihid_map_size;
250
Joerg Roedeldfbb6d42013-04-09 19:06:18 +0200251static bool __initdata cmdline_maps;
Joerg Roedel235dacb2013-04-09 17:53:14 +0200252
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200253static enum iommu_init_state init_state = IOMMU_START_STATE;
254
Gerard Snitselaarae295142012-03-16 11:38:22 -0700255static int amd_iommu_enable_interrupts(void);
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200256static int __init iommu_go_to_state(enum iommu_init_state state);
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200257static void init_device_table_dma(void);
Joerg Roedel3d9761e2012-03-15 16:39:21 +0100258
Joerg Roedel2479c632017-08-19 00:35:40 +0200259static bool amd_iommu_pre_enabled = true;
Baoquan He3ac3e5ee2017-08-09 16:33:38 +0800260
Baoquan He4c232a72017-08-09 16:33:33 +0800261bool translation_pre_enabled(struct amd_iommu *iommu)
262{
263 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
264}
Baoquan Hedaae2d22017-08-09 16:33:43 +0800265EXPORT_SYMBOL(translation_pre_enabled);
Baoquan He4c232a72017-08-09 16:33:33 +0800266
267static void clear_translation_pre_enabled(struct amd_iommu *iommu)
268{
269 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
270}
271
272static void init_translation_status(struct amd_iommu *iommu)
273{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500274 u64 ctrl;
Baoquan He4c232a72017-08-09 16:33:33 +0800275
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500276 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
Baoquan He4c232a72017-08-09 16:33:33 +0800277 if (ctrl & (1<<CONTROL_IOMMU_EN))
278 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
279}
280
Joerg Roedel208ec8c2008-07-11 17:14:24 +0200281static inline void update_last_devid(u16 devid)
282{
283 if (devid > amd_iommu_last_bdf)
284 amd_iommu_last_bdf = devid;
285}
286
Joerg Roedelc5714842008-07-11 17:14:25 +0200287static inline unsigned long tbl_size(int entry_size)
288{
289 unsigned shift = PAGE_SHIFT +
Neil Turton421f9092009-05-14 14:00:35 +0100290 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
Joerg Roedelc5714842008-07-11 17:14:25 +0200291
292 return 1UL << shift;
293}
294
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -0600295int amd_iommu_get_num_iommus(void)
296{
297 return amd_iommus_present;
298}
299
Matthew Garrett5bcd7572010-10-04 14:59:31 -0400300/* Access to l1 and l2 indexed register spaces */
301
302static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
303{
304 u32 val;
305
306 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
307 pci_read_config_dword(iommu->dev, 0xfc, &val);
308 return val;
309}
310
311static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
312{
313 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
314 pci_write_config_dword(iommu->dev, 0xfc, val);
315 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
316}
317
318static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
319{
320 u32 val;
321
322 pci_write_config_dword(iommu->dev, 0xf0, address);
323 pci_read_config_dword(iommu->dev, 0xf4, &val);
324 return val;
325}
326
327static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
328{
329 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
330 pci_write_config_dword(iommu->dev, 0xf4, val);
331}
332
Joerg Roedelb65233a2008-07-11 17:14:21 +0200333/****************************************************************************
334 *
335 * AMD IOMMU MMIO register space handling functions
336 *
337 * These functions are used to program the IOMMU device registers in
338 * MMIO space required for that driver.
339 *
340 ****************************************************************************/
341
342/*
343 * This function set the exclusion range in the IOMMU. DMA accesses to the
344 * exclusion range are passed through untranslated
345 */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200346static void iommu_set_exclusion_range(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200347{
348 u64 start = iommu->exclusion_start & PAGE_MASK;
Joerg Roedel3c677d202019-04-12 12:50:31 +0200349 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200350 u64 entry;
351
352 if (!iommu->exclusion_start)
353 return;
354
355 entry = start | MMIO_EXCL_ENABLE_MASK;
356 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
357 &entry, sizeof(entry));
358
359 entry = limit;
360 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
361 &entry, sizeof(entry));
362}
363
Joerg Roedelb65233a2008-07-11 17:14:21 +0200364/* Programs the physical address of the device table into the IOMMU hardware */
Jan Beulich6b7f0002012-03-08 08:58:13 +0000365static void iommu_set_device_table(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200366{
Andreas Herrmannf6098912008-10-16 16:27:36 +0200367 u64 entry;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200368
369 BUG_ON(iommu->mmio_base == NULL);
370
Tom Lendacky2543a782017-07-17 16:10:24 -0500371 entry = iommu_virt_to_phys(amd_iommu_dev_table);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200372 entry |= (dev_table_size >> 12) - 1;
373 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
374 &entry, sizeof(entry));
375}
376
Joerg Roedelb65233a2008-07-11 17:14:21 +0200377/* Generic functions to enable/disable certain features of the IOMMU. */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200378static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200379{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500380 u64 ctrl;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200381
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500382 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
383 ctrl |= (1ULL << bit);
384 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200385}
386
Joerg Roedelca0207112009-10-28 18:02:26 +0100387static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200388{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500389 u64 ctrl;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200390
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500391 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
392 ctrl &= ~(1ULL << bit);
393 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200394}
395
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100396static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
397{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500398 u64 ctrl;
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100399
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500400 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100401 ctrl &= ~CTRL_INV_TO_MASK;
402 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500403 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100404}
405
Joerg Roedelb65233a2008-07-11 17:14:21 +0200406/* Function to enable the hardware */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200407static void iommu_enable(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200408{
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200409 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200410}
411
Joerg Roedel92ac4322009-05-19 19:06:27 +0200412static void iommu_disable(struct amd_iommu *iommu)
Joerg Roedel126c52b2008-09-09 16:47:35 +0200413{
Kevin Mitchell3ddbe912019-06-12 14:52:03 -0700414 if (!iommu->mmio_base)
415 return;
416
Chris Wrighta8c485b2009-06-15 15:53:45 +0200417 /* Disable command buffer */
418 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
419
420 /* Disable event logging and event interrupts */
421 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
422 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
423
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500424 /* Disable IOMMU GA_LOG */
425 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
426 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
427
Chris Wrighta8c485b2009-06-15 15:53:45 +0200428 /* Disable IOMMU hardware itself */
Joerg Roedel92ac4322009-05-19 19:06:27 +0200429 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
Joerg Roedel126c52b2008-09-09 16:47:35 +0200430}
431
Joerg Roedelb65233a2008-07-11 17:14:21 +0200432/*
433 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
434 * the system has one.
435 */
Steven L Kinney30861dd2013-06-05 16:11:48 -0500436static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
Joerg Roedel6c567472008-06-26 21:27:43 +0200437{
Steven L Kinney30861dd2013-06-05 16:11:48 -0500438 if (!request_mem_region(address, end, "amd_iommu")) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100439 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
Steven L Kinney30861dd2013-06-05 16:11:48 -0500440 address, end);
Joerg Roedel101fa032018-11-27 16:22:31 +0100441 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
Joerg Roedel6c567472008-06-26 21:27:43 +0200442 return NULL;
Joerg Roedele82752d2010-05-28 14:26:48 +0200443 }
Joerg Roedel6c567472008-06-26 21:27:43 +0200444
Christoph Hellwig4bdc0d62020-01-06 09:43:50 +0100445 return (u8 __iomem *)ioremap(address, end);
Joerg Roedel6c567472008-06-26 21:27:43 +0200446}
447
448static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
449{
450 if (iommu->mmio_base)
451 iounmap(iommu->mmio_base);
Steven L Kinney30861dd2013-06-05 16:11:48 -0500452 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
Joerg Roedel6c567472008-06-26 21:27:43 +0200453}
454
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400455static inline u32 get_ivhd_header_size(struct ivhd_header *h)
456{
457 u32 size = 0;
458
459 switch (h->type) {
460 case 0x10:
461 size = 24;
462 break;
463 case 0x11:
464 case 0x40:
465 size = 40;
466 break;
467 }
468 return size;
469}
470
Joerg Roedelb65233a2008-07-11 17:14:21 +0200471/****************************************************************************
472 *
473 * The functions below belong to the first pass of AMD IOMMU ACPI table
474 * parsing. In this pass we try to find out the highest device id this
475 * code has to handle. Upon this information the size of the shared data
476 * structures is determined later.
477 *
478 ****************************************************************************/
479
480/*
Joerg Roedelb514e552008-09-17 17:14:27 +0200481 * This function calculates the length of a given IVHD entry
482 */
483static inline int ivhd_entry_length(u8 *ivhd)
484{
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400485 u32 type = ((struct ivhd_entry *)ivhd)->type;
486
487 if (type < 0x80) {
488 return 0x04 << (*ivhd >> 6);
489 } else if (type == IVHD_DEV_ACPI_HID) {
490 /* For ACPI_HID, offset 21 is uid len */
491 return *((u8 *)ivhd + 21) + 22;
492 }
493 return 0;
Joerg Roedelb514e552008-09-17 17:14:27 +0200494}
495
496/*
Joerg Roedelb65233a2008-07-11 17:14:21 +0200497 * After reading the highest device id from the IOMMU PCI capability header
498 * this function looks if there is a higher device id defined in the ACPI table
499 */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200500static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
501{
502 u8 *p = (void *)h, *end = (void *)h;
503 struct ivhd_entry *dev;
504
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400505 u32 ivhd_size = get_ivhd_header_size(h);
506
507 if (!ivhd_size) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100508 pr_err("Unsupported IVHD type %#x\n", h->type);
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400509 return -EINVAL;
510 }
511
512 p += ivhd_size;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200513 end += h->length;
514
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200515 while (p < end) {
516 dev = (struct ivhd_entry *)p;
517 switch (dev->type) {
Joerg Roedeld1259412015-10-20 17:33:43 +0200518 case IVHD_DEV_ALL:
519 /* Use maximum BDF value for DEV_ALL */
520 update_last_devid(0xffff);
521 break;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200522 case IVHD_DEV_SELECT:
523 case IVHD_DEV_RANGE_END:
524 case IVHD_DEV_ALIAS:
525 case IVHD_DEV_EXT_SELECT:
Joerg Roedelb65233a2008-07-11 17:14:21 +0200526 /* all the above subfield types refer to device ids */
Joerg Roedel208ec8c2008-07-11 17:14:24 +0200527 update_last_devid(dev->devid);
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200528 break;
529 default:
530 break;
531 }
Joerg Roedelb514e552008-09-17 17:14:27 +0200532 p += ivhd_entry_length(p);
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200533 }
534
535 WARN_ON(p != end);
536
537 return 0;
538}
539
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400540static int __init check_ivrs_checksum(struct acpi_table_header *table)
541{
542 int i;
543 u8 checksum = 0, *p = (u8 *)table;
544
545 for (i = 0; i < table->length; ++i)
546 checksum += p[i];
547 if (checksum != 0) {
548 /* ACPI table corrupt */
Joerg Roedel101fa032018-11-27 16:22:31 +0100549 pr_err(FW_BUG "IVRS invalid checksum\n");
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400550 return -ENODEV;
551 }
552
553 return 0;
554}
555
Joerg Roedelb65233a2008-07-11 17:14:21 +0200556/*
557 * Iterate over all IVHD entries in the ACPI table and find the highest device
558 * id which we need to handle. This is the first of three functions which parse
559 * the ACPI table. So we check the checksum here.
560 */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200561static int __init find_last_devid_acpi(struct acpi_table_header *table)
562{
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400563 u8 *p = (u8 *)table, *end = (u8 *)table;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200564 struct ivhd_header *h;
565
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200566 p += IVRS_HEADER_LENGTH;
567
568 end += table->length;
569 while (p < end) {
570 h = (struct ivhd_header *)p;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400571 if (h->type == amd_iommu_target_ivhd_type) {
572 int ret = find_last_devid_from_ivhd(h);
573
574 if (ret)
575 return ret;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200576 }
577 p += h->length;
578 }
579 WARN_ON(p != end);
580
581 return 0;
582}
583
Joerg Roedelb65233a2008-07-11 17:14:21 +0200584/****************************************************************************
585 *
Frank Arnolddf805ab2012-08-27 19:21:04 +0200586 * The following functions belong to the code path which parses the ACPI table
Joerg Roedelb65233a2008-07-11 17:14:21 +0200587 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
588 * data structures, initialize the device/alias/rlookup table and also
589 * basically initialize the hardware.
590 *
591 ****************************************************************************/
592
593/*
594 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
595 * write commands to that buffer later and the IOMMU will execute them
596 * asynchronously
597 */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200598static int __init alloc_command_buffer(struct amd_iommu *iommu)
Joerg Roedelb36ca912008-06-26 21:27:45 +0200599{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200600 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
601 get_order(CMD_BUFFER_SIZE));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200602
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200603 return iommu->cmd_buf ? 0 : -ENOMEM;
Joerg Roedel58492e12009-05-04 18:41:16 +0200604}
605
606/*
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200607 * This function resets the command buffer if the IOMMU stopped fetching
608 * commands from it.
609 */
610void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
611{
612 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
613
614 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
615 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Tom Lendackyd334a562017-06-05 14:52:12 -0500616 iommu->cmd_buf_head = 0;
617 iommu->cmd_buf_tail = 0;
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200618
619 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
620}
621
622/*
Joerg Roedel58492e12009-05-04 18:41:16 +0200623 * This function writes the command buffer address to the hardware and
624 * enables it.
625 */
626static void iommu_enable_command_buffer(struct amd_iommu *iommu)
627{
628 u64 entry;
629
630 BUG_ON(iommu->cmd_buf == NULL);
631
Tom Lendacky2543a782017-07-17 16:10:24 -0500632 entry = iommu_virt_to_phys(iommu->cmd_buf);
Joerg Roedelb36ca912008-06-26 21:27:45 +0200633 entry |= MMIO_CMD_SIZE_512;
Joerg Roedel58492e12009-05-04 18:41:16 +0200634
Joerg Roedelb36ca912008-06-26 21:27:45 +0200635 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
Joerg Roedel58492e12009-05-04 18:41:16 +0200636 &entry, sizeof(entry));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200637
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200638 amd_iommu_reset_cmd_buffer(iommu);
Joerg Roedelb36ca912008-06-26 21:27:45 +0200639}
640
Baoquan He78d313c2017-08-09 16:33:34 +0800641/*
642 * This function disables the command buffer
643 */
644static void iommu_disable_command_buffer(struct amd_iommu *iommu)
645{
646 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
647}
648
Joerg Roedelb36ca912008-06-26 21:27:45 +0200649static void __init free_command_buffer(struct amd_iommu *iommu)
650{
Joerg Roedeldeba4bc2015-10-20 17:33:41 +0200651 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200652}
653
Joerg Roedel335503e2008-09-05 14:29:07 +0200654/* allocates the memory where the IOMMU will log its events to */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200655static int __init alloc_event_buffer(struct amd_iommu *iommu)
Joerg Roedel335503e2008-09-05 14:29:07 +0200656{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200657 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
658 get_order(EVT_BUFFER_SIZE));
Joerg Roedel335503e2008-09-05 14:29:07 +0200659
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200660 return iommu->evt_buf ? 0 : -ENOMEM;
Joerg Roedel58492e12009-05-04 18:41:16 +0200661}
662
663static void iommu_enable_event_buffer(struct amd_iommu *iommu)
664{
665 u64 entry;
666
667 BUG_ON(iommu->evt_buf == NULL);
668
Tom Lendacky2543a782017-07-17 16:10:24 -0500669 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
Joerg Roedel58492e12009-05-04 18:41:16 +0200670
Joerg Roedel335503e2008-09-05 14:29:07 +0200671 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
672 &entry, sizeof(entry));
673
Joerg Roedel090672072009-06-15 16:06:48 +0200674 /* set head and tail to zero manually */
675 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
676 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
677
Joerg Roedel58492e12009-05-04 18:41:16 +0200678 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
Joerg Roedel335503e2008-09-05 14:29:07 +0200679}
680
Baoquan He78d313c2017-08-09 16:33:34 +0800681/*
682 * This function disables the event log buffer
683 */
684static void iommu_disable_event_buffer(struct amd_iommu *iommu)
685{
686 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
687}
688
Joerg Roedel335503e2008-09-05 14:29:07 +0200689static void __init free_event_buffer(struct amd_iommu *iommu)
690{
691 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
692}
693
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100694/* allocates the memory where the IOMMU will log its events to */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200695static int __init alloc_ppr_log(struct amd_iommu *iommu)
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100696{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200697 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
698 get_order(PPR_LOG_SIZE));
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100699
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200700 return iommu->ppr_log ? 0 : -ENOMEM;
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100701}
702
703static void iommu_enable_ppr_log(struct amd_iommu *iommu)
704{
705 u64 entry;
706
707 if (iommu->ppr_log == NULL)
708 return;
709
Tom Lendacky2543a782017-07-17 16:10:24 -0500710 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100711
712 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
713 &entry, sizeof(entry));
714
715 /* set head and tail to zero manually */
716 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
717 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
718
Adrian Huangbde9e6b2019-12-30 13:56:54 +0800719 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100720 iommu_feature_enable(iommu, CONTROL_PPR_EN);
721}
722
723static void __init free_ppr_log(struct amd_iommu *iommu)
724{
725 if (iommu->ppr_log == NULL)
726 return;
727
728 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
729}
730
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500731static void free_ga_log(struct amd_iommu *iommu)
732{
733#ifdef CONFIG_IRQ_REMAP
734 if (iommu->ga_log)
735 free_pages((unsigned long)iommu->ga_log,
736 get_order(GA_LOG_SIZE));
737 if (iommu->ga_log_tail)
738 free_pages((unsigned long)iommu->ga_log_tail,
739 get_order(8));
740#endif
741}
742
743static int iommu_ga_log_enable(struct amd_iommu *iommu)
744{
745#ifdef CONFIG_IRQ_REMAP
746 u32 status, i;
747
748 if (!iommu->ga_log)
749 return -EINVAL;
750
751 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
752
753 /* Check if already running */
754 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
755 return 0;
756
757 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
758 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
759
760 for (i = 0; i < LOOP_TIMEOUT; ++i) {
761 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
762 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
763 break;
764 }
765
766 if (i >= LOOP_TIMEOUT)
767 return -EINVAL;
768#endif /* CONFIG_IRQ_REMAP */
769 return 0;
770}
771
772#ifdef CONFIG_IRQ_REMAP
773static int iommu_init_ga_log(struct amd_iommu *iommu)
774{
775 u64 entry;
776
777 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
778 return 0;
779
780 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
781 get_order(GA_LOG_SIZE));
782 if (!iommu->ga_log)
783 goto err_out;
784
785 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
786 get_order(8));
787 if (!iommu->ga_log_tail)
788 goto err_out;
789
Tom Lendacky2543a782017-07-17 16:10:24 -0500790 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500791 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
792 &entry, sizeof(entry));
Filippo Sironiab99be42018-11-12 12:26:30 +0000793 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
794 (BIT_ULL(52)-1)) & ~7ULL;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500795 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
796 &entry, sizeof(entry));
797 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
798 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
799
800 return 0;
801err_out:
802 free_ga_log(iommu);
803 return -EINVAL;
804}
805#endif /* CONFIG_IRQ_REMAP */
806
807static int iommu_init_ga(struct amd_iommu *iommu)
808{
809 int ret = 0;
810
811#ifdef CONFIG_IRQ_REMAP
812 /* Note: We have already checked GASup from IVRS table.
813 * Now, we need to make sure that GAMSup is set.
814 */
815 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
816 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
817 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
818
819 ret = iommu_init_ga_log(iommu);
820#endif /* CONFIG_IRQ_REMAP */
821
822 return ret;
823}
824
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -0500825static void iommu_enable_xt(struct amd_iommu *iommu)
826{
827#ifdef CONFIG_IRQ_REMAP
828 /*
829 * XT mode (32-bit APIC destination ID) requires
830 * GA mode (128-bit IRTE support) as a prerequisite.
831 */
832 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
833 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
834 iommu_feature_enable(iommu, CONTROL_XT_EN);
835#endif /* CONFIG_IRQ_REMAP */
836}
837
Joerg Roedelcbc33a92011-11-25 11:41:31 +0100838static void iommu_enable_gt(struct amd_iommu *iommu)
839{
840 if (!iommu_feature(iommu, FEATURE_GT))
841 return;
842
843 iommu_feature_enable(iommu, CONTROL_GT_EN);
844}
845
Joerg Roedelb65233a2008-07-11 17:14:21 +0200846/* sets a specific bit in the device table entry. */
Joerg Roedel3566b772008-06-26 21:27:46 +0200847static void set_dev_entry_bit(u16 devid, u8 bit)
848{
Joerg Roedelee6c2862011-11-09 12:06:03 +0100849 int i = (bit >> 6) & 0x03;
850 int _bit = bit & 0x3f;
Joerg Roedel3566b772008-06-26 21:27:46 +0200851
Joerg Roedelee6c2862011-11-09 12:06:03 +0100852 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
Joerg Roedel3566b772008-06-26 21:27:46 +0200853}
854
Joerg Roedelc5cca142009-10-09 18:31:20 +0200855static int get_dev_entry_bit(u16 devid, u8 bit)
856{
Joerg Roedelee6c2862011-11-09 12:06:03 +0100857 int i = (bit >> 6) & 0x03;
858 int _bit = bit & 0x3f;
Joerg Roedelc5cca142009-10-09 18:31:20 +0200859
Joerg Roedelee6c2862011-11-09 12:06:03 +0100860 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
Joerg Roedelc5cca142009-10-09 18:31:20 +0200861}
862
863
Baoquan He45a01c42017-08-09 16:33:37 +0800864static bool copy_device_table(void)
865{
Joerg Roedelae162ef2017-08-19 00:28:02 +0200866 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
Baoquan He45a01c42017-08-09 16:33:37 +0800867 struct dev_table_entry *old_devtb = NULL;
868 u32 lo, hi, devid, old_devtb_size;
869 phys_addr_t old_devtb_phys;
Baoquan He45a01c42017-08-09 16:33:37 +0800870 struct amd_iommu *iommu;
Baoquan He53019a92017-08-09 16:33:39 +0800871 u16 dom_id, dte_v, irq_v;
Baoquan He45a01c42017-08-09 16:33:37 +0800872 gfp_t gfp_flag;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800873 u64 tmp;
Baoquan He45a01c42017-08-09 16:33:37 +0800874
Baoquan He3ac3e5ee2017-08-09 16:33:38 +0800875 if (!amd_iommu_pre_enabled)
876 return false;
Baoquan He45a01c42017-08-09 16:33:37 +0800877
878 pr_warn("Translation is already enabled - trying to copy translation structures\n");
879 for_each_iommu(iommu) {
880 /* All IOMMUs should use the same device table with the same size */
881 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
882 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
883 entry = (((u64) hi) << 32) + lo;
884 if (last_entry && last_entry != entry) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530885 pr_err("IOMMU:%d should use the same dev table as others!\n",
Baoquan He45a01c42017-08-09 16:33:37 +0800886 iommu->index);
887 return false;
888 }
889 last_entry = entry;
890
891 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
892 if (old_devtb_size != dev_table_size) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530893 pr_err("The device table size of IOMMU:%d is not expected!\n",
Baoquan He45a01c42017-08-09 16:33:37 +0800894 iommu->index);
895 return false;
896 }
897 }
898
Lianbo Jiang87801582018-09-30 11:10:32 +0800899 /*
900 * When SME is enabled in the first kernel, the entry includes the
901 * memory encryption mask(sme_me_mask), we must remove the memory
902 * encryption mask to obtain the true physical address in kdump kernel.
903 */
904 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
905
Baoquan Heb3367812017-08-09 16:33:42 +0800906 if (old_devtb_phys >= 0x100000000ULL) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530907 pr_err("The address of old device table is above 4G, not trustworthy!\n");
Baoquan Heb3367812017-08-09 16:33:42 +0800908 return false;
909 }
Lianbo Jiang87801582018-09-30 11:10:32 +0800910 old_devtb = (sme_active() && is_kdump_kernel())
911 ? (__force void *)ioremap_encrypted(old_devtb_phys,
912 dev_table_size)
913 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
914
Baoquan He45a01c42017-08-09 16:33:37 +0800915 if (!old_devtb)
916 return false;
917
Baoquan Heb3367812017-08-09 16:33:42 +0800918 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
Baoquan He45a01c42017-08-09 16:33:37 +0800919 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
920 get_order(dev_table_size));
921 if (old_dev_tbl_cpy == NULL) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530922 pr_err("Failed to allocate memory for copying old device table!\n");
Baoquan He45a01c42017-08-09 16:33:37 +0800923 return false;
924 }
925
926 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
927 old_dev_tbl_cpy[devid] = old_devtb[devid];
928 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
929 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
Baoquan He53019a92017-08-09 16:33:39 +0800930
931 if (dte_v && dom_id) {
932 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
933 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
Baoquan He45a01c42017-08-09 16:33:37 +0800934 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
Baoquan Hedaae2d22017-08-09 16:33:43 +0800935 /* If gcr3 table existed, mask it out */
936 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
937 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
938 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
939 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
940 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
941 tmp |= DTE_FLAG_GV;
942 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
943 }
Baoquan He53019a92017-08-09 16:33:39 +0800944 }
945
946 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
947 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
948 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
949 if (irq_v && (int_ctl || int_tab_len)) {
950 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
951 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
952 pr_err("Wrong old irq remapping flag: %#x\n", devid);
953 return false;
954 }
955
956 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
957 }
Baoquan He45a01c42017-08-09 16:33:37 +0800958 }
959 memunmap(old_devtb);
960
961 return true;
962}
963
Joerg Roedelc5cca142009-10-09 18:31:20 +0200964void amd_iommu_apply_erratum_63(u16 devid)
965{
966 int sysmgt;
967
968 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
969 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
970
971 if (sysmgt == 0x01)
972 set_dev_entry_bit(devid, DEV_ENTRY_IW);
973}
974
Joerg Roedel5ff47892008-07-14 20:11:18 +0200975/* Writes the specific IOMMU for a device into the rlookup table */
976static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
977{
978 amd_iommu_rlookup_table[devid] = iommu;
979}
980
Joerg Roedelb65233a2008-07-11 17:14:21 +0200981/*
982 * This function takes the device specific flags read from the ACPI
983 * table and sets up the device table entry with that information
984 */
Joerg Roedel5ff47892008-07-14 20:11:18 +0200985static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
986 u16 devid, u32 flags, u32 ext_flags)
Joerg Roedel3566b772008-06-26 21:27:46 +0200987{
988 if (flags & ACPI_DEVFLAG_INITPASS)
989 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
990 if (flags & ACPI_DEVFLAG_EXTINT)
991 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
992 if (flags & ACPI_DEVFLAG_NMI)
993 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
994 if (flags & ACPI_DEVFLAG_SYSMGT1)
995 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
996 if (flags & ACPI_DEVFLAG_SYSMGT2)
997 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
998 if (flags & ACPI_DEVFLAG_LINT0)
999 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1000 if (flags & ACPI_DEVFLAG_LINT1)
1001 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
Joerg Roedel3566b772008-06-26 21:27:46 +02001002
Joerg Roedelc5cca142009-10-09 18:31:20 +02001003 amd_iommu_apply_erratum_63(devid);
1004
Joerg Roedel5ff47892008-07-14 20:11:18 +02001005 set_iommu_for_device(iommu, devid);
Joerg Roedel3566b772008-06-26 21:27:46 +02001006}
1007
Kai-Heng Feng93d05152019-08-21 13:10:04 +08001008int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
Joerg Roedel6efed632012-06-14 15:52:58 +02001009{
1010 struct devid_map *entry;
1011 struct list_head *list;
1012
Joerg Roedel31cff672013-04-09 16:53:58 +02001013 if (type == IVHD_SPECIAL_IOAPIC)
1014 list = &ioapic_map;
1015 else if (type == IVHD_SPECIAL_HPET)
1016 list = &hpet_map;
1017 else
Joerg Roedel6efed632012-06-14 15:52:58 +02001018 return -EINVAL;
1019
Joerg Roedel31cff672013-04-09 16:53:58 +02001020 list_for_each_entry(entry, list, list) {
1021 if (!(entry->id == id && entry->cmd_line))
1022 continue;
1023
Joerg Roedel101fa032018-11-27 16:22:31 +01001024 pr_info("Command-line override present for %s id %d - ignoring\n",
Joerg Roedel31cff672013-04-09 16:53:58 +02001025 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1026
Joerg Roedelc50e3242014-09-09 15:59:37 +02001027 *devid = entry->devid;
1028
Joerg Roedel31cff672013-04-09 16:53:58 +02001029 return 0;
1030 }
1031
Joerg Roedel6efed632012-06-14 15:52:58 +02001032 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1033 if (!entry)
1034 return -ENOMEM;
1035
Joerg Roedel31cff672013-04-09 16:53:58 +02001036 entry->id = id;
Joerg Roedelc50e3242014-09-09 15:59:37 +02001037 entry->devid = *devid;
Joerg Roedel31cff672013-04-09 16:53:58 +02001038 entry->cmd_line = cmd_line;
Joerg Roedel6efed632012-06-14 15:52:58 +02001039
1040 list_add_tail(&entry->list, list);
1041
1042 return 0;
1043}
1044
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001045static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1046 bool cmd_line)
1047{
1048 struct acpihid_map_entry *entry;
1049 struct list_head *list = &acpihid_map;
1050
1051 list_for_each_entry(entry, list, list) {
1052 if (strcmp(entry->hid, hid) ||
1053 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1054 !entry->cmd_line)
1055 continue;
1056
Joerg Roedel101fa032018-11-27 16:22:31 +01001057 pr_info("Command-line override for hid:%s uid:%s\n",
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001058 hid, uid);
1059 *devid = entry->devid;
1060 return 0;
1061 }
1062
1063 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1064 if (!entry)
1065 return -ENOMEM;
1066
1067 memcpy(entry->uid, uid, strlen(uid));
1068 memcpy(entry->hid, hid, strlen(hid));
1069 entry->devid = *devid;
1070 entry->cmd_line = cmd_line;
1071 entry->root_devid = (entry->devid & (~0x7));
1072
Joerg Roedel101fa032018-11-27 16:22:31 +01001073 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001074 entry->cmd_line ? "cmd" : "ivrs",
1075 entry->hid, entry->uid, entry->root_devid);
1076
1077 list_add_tail(&entry->list, list);
1078 return 0;
1079}
1080
Joerg Roedel235dacb2013-04-09 17:53:14 +02001081static int __init add_early_maps(void)
1082{
1083 int i, ret;
1084
1085 for (i = 0; i < early_ioapic_map_size; ++i) {
1086 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1087 early_ioapic_map[i].id,
Joerg Roedelc50e3242014-09-09 15:59:37 +02001088 &early_ioapic_map[i].devid,
Joerg Roedel235dacb2013-04-09 17:53:14 +02001089 early_ioapic_map[i].cmd_line);
1090 if (ret)
1091 return ret;
1092 }
1093
1094 for (i = 0; i < early_hpet_map_size; ++i) {
1095 ret = add_special_device(IVHD_SPECIAL_HPET,
1096 early_hpet_map[i].id,
Joerg Roedelc50e3242014-09-09 15:59:37 +02001097 &early_hpet_map[i].devid,
Joerg Roedel235dacb2013-04-09 17:53:14 +02001098 early_hpet_map[i].cmd_line);
1099 if (ret)
1100 return ret;
1101 }
1102
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001103 for (i = 0; i < early_acpihid_map_size; ++i) {
1104 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1105 early_acpihid_map[i].uid,
1106 &early_acpihid_map[i].devid,
1107 early_acpihid_map[i].cmd_line);
1108 if (ret)
1109 return ret;
1110 }
1111
Joerg Roedel235dacb2013-04-09 17:53:14 +02001112 return 0;
1113}
1114
Joerg Roedelb65233a2008-07-11 17:14:21 +02001115/*
Frank Arnolddf805ab2012-08-27 19:21:04 +02001116 * Reads the device exclusion range from ACPI and initializes the IOMMU with
Joerg Roedelb65233a2008-07-11 17:14:21 +02001117 * it
1118 */
Joerg Roedel3566b772008-06-26 21:27:46 +02001119static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1120{
Joerg Roedel3566b772008-06-26 21:27:46 +02001121 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1122 return;
1123
Adrian Huang387caf02019-11-14 14:14:47 +08001124 /*
1125 * Treat per-device exclusion ranges as r/w unity-mapped regions
1126 * since some buggy BIOSes might lead to the overwritten exclusion
1127 * range (exclusion_start and exclusion_length members). This
1128 * happens when there are multiple exclusion ranges (IVMD entries)
1129 * defined in ACPI table.
1130 */
1131 m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
Joerg Roedel3566b772008-06-26 21:27:46 +02001132}
1133
Joerg Roedelb65233a2008-07-11 17:14:21 +02001134/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02001135 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1136 * initializes the hardware and our data structures with it.
1137 */
Joerg Roedel6efed632012-06-14 15:52:58 +02001138static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001139 struct ivhd_header *h)
1140{
1141 u8 *p = (u8 *)h;
1142 u8 *end = p, flags = 0;
Joerg Roedel0de66d52011-06-06 16:04:02 +02001143 u16 devid = 0, devid_start = 0, devid_to = 0;
1144 u32 dev_i, ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001145 bool alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001146 struct ivhd_entry *e;
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001147 u32 ivhd_size;
Joerg Roedel235dacb2013-04-09 17:53:14 +02001148 int ret;
1149
1150
1151 ret = add_early_maps();
1152 if (ret)
1153 return ret;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001154
Kai-Heng Feng93d05152019-08-21 13:10:04 +08001155 amd_iommu_apply_ivrs_quirks();
1156
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001157 /*
Joerg Roedele9bf5192010-09-20 14:33:07 +02001158 * First save the recommended feature enable bits from ACPI
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001159 */
Joerg Roedele9bf5192010-09-20 14:33:07 +02001160 iommu->acpi_flags = h->flags;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001161
1162 /*
1163 * Done. Now parse the device entries
1164 */
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001165 ivhd_size = get_ivhd_header_size(h);
1166 if (!ivhd_size) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001167 pr_err("Unsupported IVHD type %#x\n", h->type);
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001168 return -EINVAL;
1169 }
1170
1171 p += ivhd_size;
1172
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001173 end += h->length;
1174
Joerg Roedel42a698f2009-05-20 15:41:28 +02001175
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001176 while (p < end) {
1177 e = (struct ivhd_entry *)p;
1178 switch (e->type) {
1179 case IVHD_DEV_ALL:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001180
Joerg Roedel226e8892015-10-20 17:33:44 +02001181 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
Joerg Roedel42a698f2009-05-20 15:41:28 +02001182
Joerg Roedel226e8892015-10-20 17:33:44 +02001183 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1184 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001185 break;
1186 case IVHD_DEV_SELECT:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001187
1188 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1189 "flags: %02x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001190 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001191 PCI_SLOT(e->devid),
1192 PCI_FUNC(e->devid),
1193 e->flags);
1194
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001195 devid = e->devid;
Joerg Roedel5ff47892008-07-14 20:11:18 +02001196 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001197 break;
1198 case IVHD_DEV_SELECT_RANGE_START:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001199
1200 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1201 "devid: %02x:%02x.%x flags: %02x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001202 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001203 PCI_SLOT(e->devid),
1204 PCI_FUNC(e->devid),
1205 e->flags);
1206
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001207 devid_start = e->devid;
1208 flags = e->flags;
1209 ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001210 alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001211 break;
1212 case IVHD_DEV_ALIAS:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001213
1214 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1215 "flags: %02x devid_to: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001216 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001217 PCI_SLOT(e->devid),
1218 PCI_FUNC(e->devid),
1219 e->flags,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001220 PCI_BUS_NUM(e->ext >> 8),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001221 PCI_SLOT(e->ext >> 8),
1222 PCI_FUNC(e->ext >> 8));
1223
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001224 devid = e->devid;
1225 devid_to = e->ext >> 8;
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001226 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
Neil Turton7455aab2009-05-14 14:08:11 +01001227 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001228 amd_iommu_alias_table[devid] = devid_to;
1229 break;
1230 case IVHD_DEV_ALIAS_RANGE:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001231
1232 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1233 "devid: %02x:%02x.%x flags: %02x "
1234 "devid_to: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001235 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001236 PCI_SLOT(e->devid),
1237 PCI_FUNC(e->devid),
1238 e->flags,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001239 PCI_BUS_NUM(e->ext >> 8),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001240 PCI_SLOT(e->ext >> 8),
1241 PCI_FUNC(e->ext >> 8));
1242
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001243 devid_start = e->devid;
1244 flags = e->flags;
1245 devid_to = e->ext >> 8;
1246 ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001247 alias = true;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001248 break;
1249 case IVHD_DEV_EXT_SELECT:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001250
1251 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1252 "flags: %02x ext: %08x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001253 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001254 PCI_SLOT(e->devid),
1255 PCI_FUNC(e->devid),
1256 e->flags, e->ext);
1257
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001258 devid = e->devid;
Joerg Roedel5ff47892008-07-14 20:11:18 +02001259 set_dev_entry_from_acpi(iommu, devid, e->flags,
1260 e->ext);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001261 break;
1262 case IVHD_DEV_EXT_SELECT_RANGE:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001263
1264 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1265 "%02x:%02x.%x flags: %02x ext: %08x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001266 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001267 PCI_SLOT(e->devid),
1268 PCI_FUNC(e->devid),
1269 e->flags, e->ext);
1270
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001271 devid_start = e->devid;
1272 flags = e->flags;
1273 ext_flags = e->ext;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001274 alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001275 break;
1276 case IVHD_DEV_RANGE_END:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001277
1278 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001279 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001280 PCI_SLOT(e->devid),
1281 PCI_FUNC(e->devid));
1282
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001283 devid = e->devid;
1284 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001285 if (alias) {
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001286 amd_iommu_alias_table[dev_i] = devid_to;
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001287 set_dev_entry_from_acpi(iommu,
1288 devid_to, flags, ext_flags);
1289 }
1290 set_dev_entry_from_acpi(iommu, dev_i,
1291 flags, ext_flags);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001292 }
1293 break;
Joerg Roedel6efed632012-06-14 15:52:58 +02001294 case IVHD_DEV_SPECIAL: {
1295 u8 handle, type;
1296 const char *var;
1297 u16 devid;
1298 int ret;
1299
1300 handle = e->ext & 0xff;
1301 devid = (e->ext >> 8) & 0xffff;
1302 type = (e->ext >> 24) & 0xff;
1303
1304 if (type == IVHD_SPECIAL_IOAPIC)
1305 var = "IOAPIC";
1306 else if (type == IVHD_SPECIAL_HPET)
1307 var = "HPET";
1308 else
1309 var = "UNKNOWN";
1310
1311 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1312 var, (int)handle,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001313 PCI_BUS_NUM(devid),
Joerg Roedel6efed632012-06-14 15:52:58 +02001314 PCI_SLOT(devid),
1315 PCI_FUNC(devid));
1316
Joerg Roedelc50e3242014-09-09 15:59:37 +02001317 ret = add_special_device(type, handle, &devid, false);
Joerg Roedel6efed632012-06-14 15:52:58 +02001318 if (ret)
1319 return ret;
Joerg Roedelc50e3242014-09-09 15:59:37 +02001320
1321 /*
1322 * add_special_device might update the devid in case a
1323 * command-line override is present. So call
1324 * set_dev_entry_from_acpi after add_special_device.
1325 */
1326 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1327
Joerg Roedel6efed632012-06-14 15:52:58 +02001328 break;
1329 }
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001330 case IVHD_DEV_ACPI_HID: {
1331 u16 devid;
Alexander Monakove461b8c2020-05-11 10:23:52 +00001332 u8 hid[ACPIHID_HID_LEN];
1333 u8 uid[ACPIHID_UID_LEN];
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001334 int ret;
1335
1336 if (h->type != 0x40) {
1337 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1338 e->type);
1339 break;
1340 }
1341
1342 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1343 hid[ACPIHID_HID_LEN - 1] = '\0';
1344
1345 if (!(*hid)) {
1346 pr_err(FW_BUG "Invalid HID.\n");
1347 break;
1348 }
1349
Alexander Monakove461b8c2020-05-11 10:23:52 +00001350 uid[0] = '\0';
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001351 switch (e->uidf) {
1352 case UID_NOT_PRESENT:
1353
1354 if (e->uidl != 0)
1355 pr_warn(FW_BUG "Invalid UID length.\n");
1356
1357 break;
1358 case UID_IS_INTEGER:
1359
1360 sprintf(uid, "%d", e->uid);
1361
1362 break;
1363 case UID_IS_CHARACTER:
1364
Alexander Monakove461b8c2020-05-11 10:23:52 +00001365 memcpy(uid, &e->uid, e->uidl);
1366 uid[e->uidl] = '\0';
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001367
1368 break;
1369 default:
1370 break;
1371 }
1372
Nicolas Iooss6082ee72016-06-26 10:33:29 +02001373 devid = e->devid;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001374 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1375 hid, uid,
1376 PCI_BUS_NUM(devid),
1377 PCI_SLOT(devid),
1378 PCI_FUNC(devid));
1379
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001380 flags = e->flags;
1381
1382 ret = add_acpi_hid_device(hid, uid, &devid, false);
1383 if (ret)
1384 return ret;
1385
1386 /*
1387 * add_special_device might update the devid in case a
1388 * command-line override is present. So call
1389 * set_dev_entry_from_acpi after add_special_device.
1390 */
1391 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1392
1393 break;
1394 }
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001395 default:
1396 break;
1397 }
1398
Joerg Roedelb514e552008-09-17 17:14:27 +02001399 p += ivhd_entry_length(p);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001400 }
Joerg Roedel6efed632012-06-14 15:52:58 +02001401
1402 return 0;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001403}
1404
Joerg Roedele47d4022008-06-26 21:27:48 +02001405static void __init free_iommu_one(struct amd_iommu *iommu)
1406{
1407 free_command_buffer(iommu);
Joerg Roedel335503e2008-09-05 14:29:07 +02001408 free_event_buffer(iommu);
Joerg Roedel1a29ac02011-11-10 15:41:40 +01001409 free_ppr_log(iommu);
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001410 free_ga_log(iommu);
Joerg Roedele47d4022008-06-26 21:27:48 +02001411 iommu_unmap_mmio_space(iommu);
1412}
1413
1414static void __init free_iommu_all(void)
1415{
1416 struct amd_iommu *iommu, *next;
1417
Joerg Roedel3bd22172009-05-04 15:06:20 +02001418 for_each_iommu_safe(iommu, next) {
Joerg Roedele47d4022008-06-26 21:27:48 +02001419 list_del(&iommu->list);
1420 free_iommu_one(iommu);
1421 kfree(iommu);
1422 }
1423}
1424
Joerg Roedelb65233a2008-07-11 17:14:21 +02001425/*
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001426 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1427 * Workaround:
1428 * BIOS should disable L2B micellaneous clock gating by setting
1429 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1430 */
Nikola Pajkovskye2f1a3b2013-02-26 16:12:05 +01001431static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001432{
1433 u32 value;
1434
1435 if ((boot_cpu_data.x86 != 0x15) ||
1436 (boot_cpu_data.x86_model < 0x10) ||
1437 (boot_cpu_data.x86_model > 0x1f))
1438 return;
1439
1440 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1441 pci_read_config_dword(iommu->dev, 0xf4, &value);
1442
1443 if (value & BIT(2))
1444 return;
1445
1446 /* Select NB indirect register 0x90 and enable writing */
1447 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1448
1449 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001450 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001451
1452 /* Clear the enable writing bit */
1453 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1454}
1455
1456/*
Jay Cornwall358875f2016-02-10 15:48:01 -06001457 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1458 * Workaround:
1459 * BIOS should enable ATS write permission check by setting
1460 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1461 */
1462static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1463{
1464 u32 value;
1465
1466 if ((boot_cpu_data.x86 != 0x15) ||
1467 (boot_cpu_data.x86_model < 0x30) ||
1468 (boot_cpu_data.x86_model > 0x3f))
1469 return;
1470
1471 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1472 value = iommu_read_l2(iommu, 0x47);
1473
1474 if (value & BIT(0))
1475 return;
1476
1477 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1478 iommu_write_l2(iommu, 0x47, value | BIT(0));
1479
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001480 pci_info(iommu->dev, "Applying ATS write check workaround\n");
Jay Cornwall358875f2016-02-10 15:48:01 -06001481}
1482
1483/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02001484 * This function clues the initialization function for one IOMMU
1485 * together and also allocates the command buffer and programs the
1486 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1487 */
Joerg Roedele47d4022008-06-26 21:27:48 +02001488static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1489{
Joerg Roedel6efed632012-06-14 15:52:58 +02001490 int ret;
1491
Scott Wood27790392018-01-21 03:28:54 -06001492 raw_spin_lock_init(&iommu->lock);
Joerg Roedelbb527772009-11-20 14:31:51 +01001493
1494 /* Add IOMMU to internal data structures */
Joerg Roedele47d4022008-06-26 21:27:48 +02001495 list_add_tail(&iommu->list, &amd_iommu_list);
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06001496 iommu->index = amd_iommus_present++;
Joerg Roedelbb527772009-11-20 14:31:51 +01001497
1498 if (unlikely(iommu->index >= MAX_IOMMUS)) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001499 WARN(1, "System has more IOMMUs than supported by this driver\n");
Joerg Roedelbb527772009-11-20 14:31:51 +01001500 return -ENOSYS;
1501 }
1502
1503 /* Index is fine - add IOMMU to the array */
1504 amd_iommus[iommu->index] = iommu;
Joerg Roedele47d4022008-06-26 21:27:48 +02001505
1506 /*
1507 * Copy data from ACPI table entry to the iommu struct
1508 */
Joerg Roedel23c742d2012-06-12 11:47:34 +02001509 iommu->devid = h->devid;
Joerg Roedele47d4022008-06-26 21:27:48 +02001510 iommu->cap_ptr = h->cap_ptr;
Joerg Roedelee893c22008-09-08 14:48:04 +02001511 iommu->pci_seg = h->pci_seg;
Joerg Roedele47d4022008-06-26 21:27:48 +02001512 iommu->mmio_phys = h->mmio_phys;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001513
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001514 switch (h->type) {
1515 case 0x10:
1516 /* Check if IVHD EFR contains proper max banks/counters */
1517 if ((h->efr_attr != 0) &&
1518 ((h->efr_attr & (0xF << 13)) != 0) &&
1519 ((h->efr_attr & (0x3F << 17)) != 0))
1520 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1521 else
1522 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001523 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1524 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001525 break;
1526 case 0x11:
1527 case 0x40:
1528 if (h->efr_reg & (1 << 9))
1529 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1530 else
1531 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001532 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1533 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
Suravee Suthikulpanit81307142019-11-20 07:55:48 -06001534 /*
1535 * Note: Since iommu_update_intcapxt() leverages
1536 * the IOMMU MMIO access to MSI capability block registers
1537 * for MSI address lo/hi/data, we need to check both
1538 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
1539 */
1540 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
1541 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
1542 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001543 break;
1544 default:
1545 return -EINVAL;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001546 }
1547
1548 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1549 iommu->mmio_phys_end);
Joerg Roedele47d4022008-06-26 21:27:48 +02001550 if (!iommu->mmio_base)
1551 return -ENOMEM;
1552
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001553 if (alloc_command_buffer(iommu))
Joerg Roedele47d4022008-06-26 21:27:48 +02001554 return -ENOMEM;
1555
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001556 if (alloc_event_buffer(iommu))
Joerg Roedel335503e2008-09-05 14:29:07 +02001557 return -ENOMEM;
1558
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001559 iommu->int_enabled = false;
1560
Baoquan He4c232a72017-08-09 16:33:33 +08001561 init_translation_status(iommu);
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08001562 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1563 iommu_disable(iommu);
1564 clear_translation_pre_enabled(iommu);
1565 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1566 iommu->index);
1567 }
1568 if (amd_iommu_pre_enabled)
1569 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
Baoquan He4c232a72017-08-09 16:33:33 +08001570
Joerg Roedel6efed632012-06-14 15:52:58 +02001571 ret = init_iommu_from_acpi(iommu, h);
1572 if (ret)
1573 return ret;
Joerg Roedelf6fec002012-06-21 16:51:25 +02001574
Jiang Liu7c71d302015-04-13 14:11:33 +08001575 ret = amd_iommu_create_irq_domain(iommu);
1576 if (ret)
1577 return ret;
1578
Joerg Roedelf6fec002012-06-21 16:51:25 +02001579 /*
1580 * Make sure IOMMU is not considered to translate itself. The IVRS
1581 * table tells us so, but this is a lie!
1582 */
1583 amd_iommu_rlookup_table[iommu->devid] = NULL;
1584
Joerg Roedel23c742d2012-06-12 11:47:34 +02001585 return 0;
Joerg Roedele47d4022008-06-26 21:27:48 +02001586}
1587
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001588/**
1589 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1590 * @ivrs Pointer to the IVRS header
1591 *
1592 * This function search through all IVDB of the maximum supported IVHD
1593 */
1594static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1595{
1596 u8 *base = (u8 *)ivrs;
1597 struct ivhd_header *ivhd = (struct ivhd_header *)
1598 (base + IVRS_HEADER_LENGTH);
1599 u8 last_type = ivhd->type;
1600 u16 devid = ivhd->devid;
1601
1602 while (((u8 *)ivhd - base < ivrs->length) &&
1603 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1604 u8 *p = (u8 *) ivhd;
1605
1606 if (ivhd->devid == devid)
1607 last_type = ivhd->type;
1608 ivhd = (struct ivhd_header *)(p + ivhd->length);
1609 }
1610
1611 return last_type;
1612}
1613
Joerg Roedelb65233a2008-07-11 17:14:21 +02001614/*
1615 * Iterates over all IOMMU entries in the ACPI table, allocates the
1616 * IOMMU structure and initializes it with init_iommu_one()
1617 */
Joerg Roedele47d4022008-06-26 21:27:48 +02001618static int __init init_iommu_all(struct acpi_table_header *table)
1619{
1620 u8 *p = (u8 *)table, *end = (u8 *)table;
1621 struct ivhd_header *h;
1622 struct amd_iommu *iommu;
1623 int ret;
1624
Joerg Roedele47d4022008-06-26 21:27:48 +02001625 end += table->length;
1626 p += IVRS_HEADER_LENGTH;
1627
1628 while (p < end) {
1629 h = (struct ivhd_header *)p;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001630 if (*p == amd_iommu_target_ivhd_type) {
Joerg Roedel9c720412009-05-20 13:53:57 +02001631
Joerg Roedelae908c22009-09-01 16:52:16 +02001632 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
Joerg Roedel9c720412009-05-20 13:53:57 +02001633 "seg: %d flags: %01x info %04x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001634 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
Joerg Roedel9c720412009-05-20 13:53:57 +02001635 PCI_FUNC(h->devid), h->cap_ptr,
1636 h->pci_seg, h->flags, h->info);
1637 DUMP_printk(" mmio-addr: %016llx\n",
1638 h->mmio_phys);
1639
Joerg Roedele47d4022008-06-26 21:27:48 +02001640 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02001641 if (iommu == NULL)
1642 return -ENOMEM;
Joerg Roedel3551a702010-03-01 13:52:19 +01001643
Joerg Roedele47d4022008-06-26 21:27:48 +02001644 ret = init_iommu_one(iommu, h);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02001645 if (ret)
1646 return ret;
Joerg Roedele47d4022008-06-26 21:27:48 +02001647 }
1648 p += h->length;
1649
1650 }
1651 WARN_ON(p != end);
1652
1653 return 0;
1654}
1655
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06001656static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1657 u8 fxn, u64 *value, bool is_write);
Steven L Kinney30861dd2013-06-05 16:11:48 -05001658
1659static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1660{
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001661 struct pci_dev *pdev = iommu->dev;
Shuah Khan8c17bbf2020-01-23 15:32:14 -07001662 u64 val = 0xabcd, val2 = 0, save_reg = 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001663
1664 if (!iommu_feature(iommu, FEATURE_PC))
1665 return;
1666
1667 amd_iommu_pc_present = true;
1668
Shuah Khan8c17bbf2020-01-23 15:32:14 -07001669 /* save the value to restore, if writable */
1670 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1671 goto pc_false;
1672
Steven L Kinney30861dd2013-06-05 16:11:48 -05001673 /* Check if the performance counters can be written to */
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06001674 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1675 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
Shuah Khan8c17bbf2020-01-23 15:32:14 -07001676 (val != val2))
1677 goto pc_false;
1678
1679 /* restore */
1680 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1681 goto pc_false;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001682
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001683 pci_info(pdev, "IOMMU performance counters supported\n");
Steven L Kinney30861dd2013-06-05 16:11:48 -05001684
1685 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1686 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1687 iommu->max_counters = (u8) ((val >> 7) & 0xf);
Shuah Khan8c17bbf2020-01-23 15:32:14 -07001688
1689 return;
1690
1691pc_false:
1692 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1693 amd_iommu_pc_present = false;
1694 return;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001695}
1696
Alex Williamson066f2e92014-06-12 16:12:37 -06001697static ssize_t amd_iommu_show_cap(struct device *dev,
1698 struct device_attribute *attr,
1699 char *buf)
1700{
Joerg Roedelb7a42b92017-02-28 13:57:18 +01001701 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
Alex Williamson066f2e92014-06-12 16:12:37 -06001702 return sprintf(buf, "%x\n", iommu->cap);
1703}
1704static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1705
1706static ssize_t amd_iommu_show_features(struct device *dev,
1707 struct device_attribute *attr,
1708 char *buf)
1709{
Joerg Roedelb7a42b92017-02-28 13:57:18 +01001710 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
Alex Williamson066f2e92014-06-12 16:12:37 -06001711 return sprintf(buf, "%llx\n", iommu->features);
1712}
1713static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1714
1715static struct attribute *amd_iommu_attrs[] = {
1716 &dev_attr_cap.attr,
1717 &dev_attr_features.attr,
1718 NULL,
1719};
1720
1721static struct attribute_group amd_iommu_group = {
1722 .name = "amd-iommu",
1723 .attrs = amd_iommu_attrs,
1724};
1725
1726static const struct attribute_group *amd_iommu_groups[] = {
1727 &amd_iommu_group,
1728 NULL,
1729};
Steven L Kinney30861dd2013-06-05 16:11:48 -05001730
Joerg Roedel24d2c522018-10-05 12:32:46 +02001731static int __init iommu_init_pci(struct amd_iommu *iommu)
Joerg Roedel23c742d2012-06-12 11:47:34 +02001732{
1733 int cap_ptr = iommu->cap_ptr;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001734 int ret;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001735
Sinan Kayad5bf0f42017-12-19 00:37:47 -05001736 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1737 iommu->devid & 0xff);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001738 if (!iommu->dev)
1739 return -ENODEV;
1740
Jiang Liucbbc00b2015-10-09 22:07:31 +08001741 /* Prevent binding other PCI device drivers to IOMMU devices */
1742 iommu->dev->match_driver = false;
1743
Joerg Roedel23c742d2012-06-12 11:47:34 +02001744 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1745 &iommu->cap);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001746
Joerg Roedel23c742d2012-06-12 11:47:34 +02001747 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1748 amd_iommu_iotlb_sup = false;
1749
1750 /* read extended feature bits */
Adrian Huang62dcee72020-01-09 11:02:50 +08001751 iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001752
1753 if (iommu_feature(iommu, FEATURE_GT)) {
1754 int glxval;
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001755 u32 max_pasid;
1756 u64 pasmax;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001757
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001758 pasmax = iommu->features & FEATURE_PASID_MASK;
1759 pasmax >>= FEATURE_PASID_SHIFT;
1760 max_pasid = (1 << (pasmax + 1)) - 1;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001761
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001762 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1763
1764 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001765
1766 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1767 glxval >>= FEATURE_GLXVAL_SHIFT;
1768
1769 if (amd_iommu_max_glx_val == -1)
1770 amd_iommu_max_glx_val = glxval;
1771 else
1772 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1773 }
1774
1775 if (iommu_feature(iommu, FEATURE_GT) &&
1776 iommu_feature(iommu, FEATURE_PPR)) {
1777 iommu->is_iommu_v2 = true;
1778 amd_iommu_v2_present = true;
1779 }
1780
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001781 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1782 return -ENOMEM;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001783
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001784 ret = iommu_init_ga(iommu);
1785 if (ret)
1786 return ret;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001787
Joerg Roedel23c742d2012-06-12 11:47:34 +02001788 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1789 amd_iommu_np_cache = true;
1790
Steven L Kinney30861dd2013-06-05 16:11:48 -05001791 init_iommu_perf_ctr(iommu);
1792
Joerg Roedel23c742d2012-06-12 11:47:34 +02001793 if (is_rd890_iommu(iommu->dev)) {
1794 int i, j;
1795
Sinan Kayad5bf0f42017-12-19 00:37:47 -05001796 iommu->root_pdev =
1797 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1798 PCI_DEVFN(0, 0));
Joerg Roedel23c742d2012-06-12 11:47:34 +02001799
1800 /*
1801 * Some rd890 systems may not be fully reconfigured by the
1802 * BIOS, so it's necessary for us to store this information so
1803 * it can be reprogrammed on resume
1804 */
1805 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1806 &iommu->stored_addr_lo);
1807 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1808 &iommu->stored_addr_hi);
1809
1810 /* Low bit locks writes to configuration space */
1811 iommu->stored_addr_lo &= ~1;
1812
1813 for (i = 0; i < 6; i++)
1814 for (j = 0; j < 0x12; j++)
1815 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1816
1817 for (i = 0; i < 0x83; i++)
1818 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1819 }
1820
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001821 amd_iommu_erratum_746_workaround(iommu);
Jay Cornwall358875f2016-02-10 15:48:01 -06001822 amd_iommu_ats_write_check_workaround(iommu);
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001823
Joerg Roedel39ab9552017-02-01 16:56:46 +01001824 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1825 amd_iommu_groups, "ivhd%d", iommu->index);
Joerg Roedelb0119e82017-02-01 13:23:08 +01001826 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1827 iommu_device_register(&iommu->iommu);
Alex Williamson066f2e92014-06-12 16:12:37 -06001828
Joerg Roedel23c742d2012-06-12 11:47:34 +02001829 return pci_enable_device(iommu->dev);
1830}
1831
Joerg Roedel4d121c32012-06-14 12:21:55 +02001832static void print_iommu_info(void)
1833{
1834 static const char * const feat_str[] = {
1835 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1836 "IA", "GA", "HE", "PC"
1837 };
1838 struct amd_iommu *iommu;
1839
1840 for_each_iommu(iommu) {
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001841 struct pci_dev *pdev = iommu->dev;
Joerg Roedel4d121c32012-06-14 12:21:55 +02001842 int i;
1843
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001844 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
Joerg Roedel4d121c32012-06-14 12:21:55 +02001845
1846 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001847 pci_info(pdev, "Extended features (%#llx):\n",
1848 iommu->features);
Joerg Roedel2bd5ed02012-08-10 11:34:08 +02001849 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
Joerg Roedel4d121c32012-06-14 12:21:55 +02001850 if (iommu_feature(iommu, (1ULL << i)))
1851 pr_cont(" %s", feat_str[i]);
1852 }
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001853
1854 if (iommu->features & FEATURE_GAM_VAPIC)
1855 pr_cont(" GA_vAPIC");
1856
Steven L Kinney30861dd2013-06-05 16:11:48 -05001857 pr_cont("\n");
Borislav Petkov500c25e2012-09-28 16:22:26 +02001858 }
Joerg Roedel4d121c32012-06-14 12:21:55 +02001859 }
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001860 if (irq_remapping_enabled) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001861 pr_info("Interrupt remapping enabled\n");
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001862 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
Joerg Roedel101fa032018-11-27 16:22:31 +01001863 pr_info("Virtual APIC enabled\n");
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05001864 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
Joerg Roedel101fa032018-11-27 16:22:31 +01001865 pr_info("X2APIC enabled\n");
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001866 }
Joerg Roedel4d121c32012-06-14 12:21:55 +02001867}
1868
Joerg Roedel2c0ae172012-06-12 15:59:30 +02001869static int __init amd_iommu_init_pci(void)
Joerg Roedel23c742d2012-06-12 11:47:34 +02001870{
1871 struct amd_iommu *iommu;
1872 int ret = 0;
1873
1874 for_each_iommu(iommu) {
1875 ret = iommu_init_pci(iommu);
1876 if (ret)
1877 break;
1878 }
1879
Joerg Roedel522e5cb72016-07-01 16:42:55 +02001880 /*
1881 * Order is important here to make sure any unity map requirements are
1882 * fulfilled. The unity mappings are created and written to the device
1883 * table during the amd_iommu_init_api() call.
1884 *
1885 * After that we call init_device_table_dma() to make sure any
1886 * uninitialized DTE will block DMA, and in the end we flush the caches
1887 * of all IOMMUs to make sure the changes to the device table are
1888 * active.
1889 */
1890 ret = amd_iommu_init_api();
1891
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02001892 init_device_table_dma();
Joerg Roedel23c742d2012-06-12 11:47:34 +02001893
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02001894 for_each_iommu(iommu)
1895 iommu_flush_all_caches(iommu);
1896
Joerg Roedel3a18404c2015-05-28 18:41:45 +02001897 if (!ret)
1898 print_iommu_info();
Joerg Roedel4d121c32012-06-14 12:21:55 +02001899
Joerg Roedel23c742d2012-06-12 11:47:34 +02001900 return ret;
1901}
1902
Joerg Roedelb65233a2008-07-11 17:14:21 +02001903/****************************************************************************
1904 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001905 * The following functions initialize the MSI interrupts for all IOMMUs
Frank Arnolddf805ab2012-08-27 19:21:04 +02001906 * in the system. It's a bit challenging because there could be multiple
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001907 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1908 * pci_dev.
1909 *
1910 ****************************************************************************/
1911
Joerg Roedel9f800de2009-11-23 12:45:25 +01001912static int iommu_setup_msi(struct amd_iommu *iommu)
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001913{
1914 int r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001915
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001916 r = pci_enable_msi(iommu->dev);
1917 if (r)
1918 return r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001919
Joerg Roedel72fe00f2011-05-10 10:50:42 +02001920 r = request_threaded_irq(iommu->dev->irq,
1921 amd_iommu_int_handler,
1922 amd_iommu_int_thread,
1923 0, "AMD-Vi",
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -05001924 iommu);
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001925
1926 if (r) {
1927 pci_disable_msi(iommu->dev);
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001928 return r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001929 }
1930
Joerg Roedelfab6afa2009-05-04 18:46:34 +02001931 iommu->int_enabled = true;
Joerg Roedel1a29ac02011-11-10 15:41:40 +01001932
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001933 return 0;
1934}
1935
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +00001936#define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
1937#define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
1938#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
1939#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
1940
1941/**
1942 * Setup the IntCapXT registers with interrupt routing information
1943 * based on the PCI MSI capability block registers, accessed via
1944 * MMIO MSI address low/hi and MSI data registers.
1945 */
1946static void iommu_update_intcapxt(struct amd_iommu *iommu)
1947{
1948 u64 val;
1949 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
1950 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
1951 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
1952 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
1953 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
1954
1955 if (x2apic_enabled())
1956 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
1957
1958 val = XT_INT_VEC(data & 0xFF) |
1959 XT_INT_DEST_MODE(dm) |
1960 XT_INT_DEST_LO(dest) |
1961 XT_INT_DEST_HI(dest);
1962
1963 /**
1964 * Current IOMMU implemtation uses the same IRQ for all
1965 * 3 IOMMU interrupts.
1966 */
1967 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
1968 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
1969 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
1970}
1971
1972static void _irq_notifier_notify(struct irq_affinity_notify *notify,
1973 const cpumask_t *mask)
1974{
1975 struct amd_iommu *iommu;
1976
1977 for_each_iommu(iommu) {
1978 if (iommu->dev->irq == notify->irq) {
1979 iommu_update_intcapxt(iommu);
1980 break;
1981 }
1982 }
1983}
1984
1985static void _irq_notifier_release(struct kref *ref)
1986{
1987}
1988
1989static int iommu_init_intcapxt(struct amd_iommu *iommu)
1990{
1991 int ret;
1992 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
1993
1994 /**
Suravee Suthikulpanit81307142019-11-20 07:55:48 -06001995 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
1996 * which can be inferred from amd_iommu_xt_mode.
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +00001997 */
1998 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
1999 return 0;
2000
2001 /**
2002 * Also, we need to setup notifier to update the IntCapXT registers
2003 * whenever the irq affinity is changed from user-space.
2004 */
2005 notify->irq = iommu->dev->irq;
2006 notify->notify = _irq_notifier_notify,
2007 notify->release = _irq_notifier_release,
2008 ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
2009 if (ret) {
2010 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2011 iommu->devid, iommu->dev->irq);
2012 return ret;
2013 }
2014
2015 iommu_update_intcapxt(iommu);
2016 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2017 return ret;
2018}
2019
Joerg Roedel05f92db2009-05-12 09:52:46 +02002020static int iommu_init_msi(struct amd_iommu *iommu)
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002021{
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002022 int ret;
2023
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002024 if (iommu->int_enabled)
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002025 goto enable_faults;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002026
Yijing Wang82fcfc62013-08-08 21:12:36 +08002027 if (iommu->dev->msi_cap)
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002028 ret = iommu_setup_msi(iommu);
2029 else
2030 ret = -ENODEV;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002031
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002032 if (ret)
2033 return ret;
2034
2035enable_faults:
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +00002036 ret = iommu_init_intcapxt(iommu);
2037 if (ret)
2038 return ret;
2039
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002040 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2041
2042 if (iommu->ppr_log != NULL)
Adrian Huangbde9e6b2019-12-30 13:56:54 +08002043 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002044
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05002045 iommu_ga_log_enable(iommu);
2046
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002047 return 0;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002048}
2049
2050/****************************************************************************
2051 *
Joerg Roedelb65233a2008-07-11 17:14:21 +02002052 * The next functions belong to the third pass of parsing the ACPI
2053 * table. In this last pass the memory mapping requirements are
Frank Arnolddf805ab2012-08-27 19:21:04 +02002054 * gathered (like exclusion and unity mapping ranges).
Joerg Roedelb65233a2008-07-11 17:14:21 +02002055 *
2056 ****************************************************************************/
2057
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002058static void __init free_unity_maps(void)
2059{
2060 struct unity_map_entry *entry, *next;
2061
2062 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2063 list_del(&entry->list);
2064 kfree(entry);
2065 }
2066}
2067
Joerg Roedelb65233a2008-07-11 17:14:21 +02002068/* called when we find an exclusion range definition in ACPI */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002069static int __init init_exclusion_range(struct ivmd_header *m)
2070{
2071 int i;
2072
2073 switch (m->type) {
2074 case ACPI_IVMD_TYPE:
2075 set_device_exclusion_range(m->devid, m);
2076 break;
2077 case ACPI_IVMD_TYPE_ALL:
Joerg Roedel3a61ec32008-07-25 13:07:50 +02002078 for (i = 0; i <= amd_iommu_last_bdf; ++i)
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002079 set_device_exclusion_range(i, m);
2080 break;
2081 case ACPI_IVMD_TYPE_RANGE:
2082 for (i = m->devid; i <= m->aux; ++i)
2083 set_device_exclusion_range(i, m);
2084 break;
2085 default:
2086 break;
2087 }
2088
2089 return 0;
2090}
2091
Joerg Roedelb65233a2008-07-11 17:14:21 +02002092/* called for unity map ACPI definition */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002093static int __init init_unity_map_range(struct ivmd_header *m)
2094{
Joerg Roedel98f1ad22012-07-06 13:28:37 +02002095 struct unity_map_entry *e = NULL;
Joerg Roedel02acc432009-05-20 16:24:21 +02002096 char *s;
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002097
2098 e = kzalloc(sizeof(*e), GFP_KERNEL);
2099 if (e == NULL)
2100 return -ENOMEM;
2101
Joerg Roedel8aafaaf2019-03-28 11:44:59 +01002102 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2103 init_exclusion_range(m);
2104
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002105 switch (m->type) {
2106 default:
Joerg Roedel0bc252f2009-05-22 12:48:05 +02002107 kfree(e);
2108 return 0;
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002109 case ACPI_IVMD_TYPE:
Joerg Roedel02acc432009-05-20 16:24:21 +02002110 s = "IVMD_TYPEi\t\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002111 e->devid_start = e->devid_end = m->devid;
2112 break;
2113 case ACPI_IVMD_TYPE_ALL:
Joerg Roedel02acc432009-05-20 16:24:21 +02002114 s = "IVMD_TYPE_ALL\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002115 e->devid_start = 0;
2116 e->devid_end = amd_iommu_last_bdf;
2117 break;
2118 case ACPI_IVMD_TYPE_RANGE:
Joerg Roedel02acc432009-05-20 16:24:21 +02002119 s = "IVMD_TYPE_RANGE\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002120 e->devid_start = m->devid;
2121 e->devid_end = m->aux;
2122 break;
2123 }
2124 e->address_start = PAGE_ALIGN(m->range_start);
2125 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2126 e->prot = m->flags >> 1;
2127
Joerg Roedel02acc432009-05-20 16:24:21 +02002128 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2129 " range_start: %016llx range_end: %016llx flags: %x\n", s,
Shuah Khanc5081cd2013-02-27 17:07:19 -07002130 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2131 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
Joerg Roedel02acc432009-05-20 16:24:21 +02002132 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2133 e->address_start, e->address_end, m->flags);
2134
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002135 list_add_tail(&e->list, &amd_iommu_unity_map);
2136
2137 return 0;
2138}
2139
Joerg Roedelb65233a2008-07-11 17:14:21 +02002140/* iterates over all memory definitions we find in the ACPI table */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002141static int __init init_memory_definitions(struct acpi_table_header *table)
2142{
2143 u8 *p = (u8 *)table, *end = (u8 *)table;
2144 struct ivmd_header *m;
2145
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002146 end += table->length;
2147 p += IVRS_HEADER_LENGTH;
2148
2149 while (p < end) {
2150 m = (struct ivmd_header *)p;
Joerg Roedel8aafaaf2019-03-28 11:44:59 +01002151 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002152 init_unity_map_range(m);
2153
2154 p += m->length;
2155 }
2156
2157 return 0;
2158}
2159
Joerg Roedelb65233a2008-07-11 17:14:21 +02002160/*
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002161 * Init the device table to not allow DMA access for devices
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002162 */
Joerg Roedel33f28c52012-06-15 18:03:31 +02002163static void init_device_table_dma(void)
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002164{
Joerg Roedel0de66d52011-06-06 16:04:02 +02002165 u32 devid;
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002166
2167 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2168 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2169 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002170 }
2171}
2172
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002173static void __init uninit_device_table_dma(void)
2174{
2175 u32 devid;
2176
2177 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2178 amd_iommu_dev_table[devid].data[0] = 0ULL;
2179 amd_iommu_dev_table[devid].data[1] = 0ULL;
2180 }
2181}
2182
Joerg Roedel33f28c52012-06-15 18:03:31 +02002183static void init_device_table(void)
2184{
2185 u32 devid;
2186
2187 if (!amd_iommu_irq_remap)
2188 return;
2189
2190 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2191 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2192}
2193
Joerg Roedele9bf5192010-09-20 14:33:07 +02002194static void iommu_init_flags(struct amd_iommu *iommu)
2195{
2196 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2197 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2198 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2199
2200 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2201 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2202 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2203
2204 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2205 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2206 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2207
2208 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2209 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2210 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2211
2212 /*
2213 * make IOMMU memory accesses cache coherent
2214 */
2215 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
Joerg Roedel1456e9d2011-12-22 14:51:53 +01002216
2217 /* Set IOTLB invalidation timeout to 1s */
2218 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
Joerg Roedele9bf5192010-09-20 14:33:07 +02002219}
2220
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002221static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
Joerg Roedel4c894f42010-09-23 15:15:19 +02002222{
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002223 int i, j;
2224 u32 ioc_feature_control;
Joerg Roedelc1bf94e2012-05-31 17:38:11 +02002225 struct pci_dev *pdev = iommu->root_pdev;
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002226
2227 /* RD890 BIOSes may not have completely reconfigured the iommu */
Joerg Roedelc1bf94e2012-05-31 17:38:11 +02002228 if (!is_rd890_iommu(iommu->dev) || !pdev)
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002229 return;
2230
2231 /*
2232 * First, we need to ensure that the iommu is enabled. This is
2233 * controlled by a register in the northbridge
2234 */
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002235
2236 /* Select Northbridge indirect register 0x75 and enable writing */
2237 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2238 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2239
2240 /* Enable the iommu */
2241 if (!(ioc_feature_control & 0x1))
2242 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2243
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002244 /* Restore the iommu BAR */
2245 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2246 iommu->stored_addr_lo);
2247 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2248 iommu->stored_addr_hi);
2249
2250 /* Restore the l1 indirect regs for each of the 6 l1s */
2251 for (i = 0; i < 6; i++)
2252 for (j = 0; j < 0x12; j++)
2253 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2254
2255 /* Restore the l2 indirect regs */
2256 for (i = 0; i < 0x83; i++)
2257 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2258
2259 /* Lock PCI setup registers */
2260 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2261 iommu->stored_addr_lo | 1);
Joerg Roedel4c894f42010-09-23 15:15:19 +02002262}
2263
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002264static void iommu_enable_ga(struct amd_iommu *iommu)
2265{
2266#ifdef CONFIG_IRQ_REMAP
2267 switch (amd_iommu_guest_ir) {
2268 case AMD_IOMMU_GUEST_IR_VAPIC:
2269 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2270 /* Fall through */
2271 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2272 iommu_feature_enable(iommu, CONTROL_GA_EN);
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05002273 iommu->irte_ops = &irte_128_ops;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002274 break;
2275 default:
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05002276 iommu->irte_ops = &irte_32_ops;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002277 break;
2278 }
2279#endif
2280}
2281
Baoquan He78d313c2017-08-09 16:33:34 +08002282static void early_enable_iommu(struct amd_iommu *iommu)
2283{
2284 iommu_disable(iommu);
2285 iommu_init_flags(iommu);
2286 iommu_set_device_table(iommu);
2287 iommu_enable_command_buffer(iommu);
2288 iommu_enable_event_buffer(iommu);
2289 iommu_set_exclusion_range(iommu);
2290 iommu_enable_ga(iommu);
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002291 iommu_enable_xt(iommu);
Baoquan He78d313c2017-08-09 16:33:34 +08002292 iommu_enable(iommu);
2293 iommu_flush_all_caches(iommu);
2294}
2295
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002296/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02002297 * This function finally enables all IOMMUs found in the system after
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002298 * they have been initialized.
2299 *
2300 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2301 * the old content of device table entries. Not this case or copy failed,
2302 * just continue as normal kernel does.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002303 */
Joerg Roedel11ee5ac2012-06-12 16:30:06 +02002304static void early_enable_iommus(void)
Joerg Roedel87361972008-06-26 21:28:07 +02002305{
2306 struct amd_iommu *iommu;
2307
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002308
2309 if (!copy_device_table()) {
2310 /*
2311 * If come here because of failure in copying device table from old
2312 * kernel with all IOMMUs enabled, print error message and try to
2313 * free allocated old_dev_tbl_cpy.
2314 */
2315 if (amd_iommu_pre_enabled)
2316 pr_err("Failed to copy DEV table from previous kernel.\n");
2317 if (old_dev_tbl_cpy != NULL)
2318 free_pages((unsigned long)old_dev_tbl_cpy,
2319 get_order(dev_table_size));
2320
2321 for_each_iommu(iommu) {
2322 clear_translation_pre_enabled(iommu);
2323 early_enable_iommu(iommu);
2324 }
2325 } else {
2326 pr_info("Copied DEV table from previous kernel.\n");
2327 free_pages((unsigned long)amd_iommu_dev_table,
2328 get_order(dev_table_size));
2329 amd_iommu_dev_table = old_dev_tbl_cpy;
2330 for_each_iommu(iommu) {
2331 iommu_disable_command_buffer(iommu);
2332 iommu_disable_event_buffer(iommu);
2333 iommu_enable_command_buffer(iommu);
2334 iommu_enable_event_buffer(iommu);
2335 iommu_enable_ga(iommu);
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002336 iommu_enable_xt(iommu);
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002337 iommu_set_device_table(iommu);
2338 iommu_flush_all_caches(iommu);
2339 }
Joerg Roedel87361972008-06-26 21:28:07 +02002340 }
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002341
2342#ifdef CONFIG_IRQ_REMAP
2343 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2344 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2345#endif
Joerg Roedel87361972008-06-26 21:28:07 +02002346}
2347
Joerg Roedel11ee5ac2012-06-12 16:30:06 +02002348static void enable_iommus_v2(void)
2349{
2350 struct amd_iommu *iommu;
2351
2352 for_each_iommu(iommu) {
2353 iommu_enable_ppr_log(iommu);
2354 iommu_enable_gt(iommu);
2355 }
2356}
2357
2358static void enable_iommus(void)
2359{
2360 early_enable_iommus();
2361
2362 enable_iommus_v2();
2363}
2364
Joerg Roedel92ac4322009-05-19 19:06:27 +02002365static void disable_iommus(void)
2366{
2367 struct amd_iommu *iommu;
2368
2369 for_each_iommu(iommu)
2370 iommu_disable(iommu);
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002371
2372#ifdef CONFIG_IRQ_REMAP
2373 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2374 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2375#endif
Joerg Roedel92ac4322009-05-19 19:06:27 +02002376}
2377
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002378/*
2379 * Suspend/Resume support
2380 * disable suspend until real resume implemented
2381 */
2382
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002383static void amd_iommu_resume(void)
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002384{
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002385 struct amd_iommu *iommu;
2386
2387 for_each_iommu(iommu)
2388 iommu_apply_resume_quirks(iommu);
2389
Joerg Roedel736501e2009-05-12 09:56:12 +02002390 /* re-load the hardware */
2391 enable_iommus();
Joerg Roedel3d9761e2012-03-15 16:39:21 +01002392
2393 amd_iommu_enable_interrupts();
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002394}
2395
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002396static int amd_iommu_suspend(void)
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002397{
Joerg Roedel736501e2009-05-12 09:56:12 +02002398 /* disable IOMMUs to go out of the way for BIOS */
2399 disable_iommus();
2400
2401 return 0;
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002402}
2403
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002404static struct syscore_ops amd_iommu_syscore_ops = {
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002405 .suspend = amd_iommu_suspend,
2406 .resume = amd_iommu_resume,
2407};
2408
Joerg Roedel90b3eb02017-06-16 16:09:55 +02002409static void __init free_iommu_resources(void)
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002410{
Lucas Stachebcfa282016-10-26 13:09:53 +02002411 kmemleak_free(irq_lookup_table);
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002412 free_pages((unsigned long)irq_lookup_table,
2413 get_order(rlookup_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002414 irq_lookup_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002415
Julia Lawalla5919892015-09-13 14:15:31 +02002416 kmem_cache_destroy(amd_iommu_irq_cache);
2417 amd_iommu_irq_cache = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002418
2419 free_pages((unsigned long)amd_iommu_rlookup_table,
2420 get_order(rlookup_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002421 amd_iommu_rlookup_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002422
2423 free_pages((unsigned long)amd_iommu_alias_table,
2424 get_order(alias_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002425 amd_iommu_alias_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002426
2427 free_pages((unsigned long)amd_iommu_dev_table,
2428 get_order(dev_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002429 amd_iommu_dev_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002430
2431 free_iommu_all();
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002432}
2433
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002434/* SB IOAPIC is always on this device in AMD systems */
2435#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2436
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002437static bool __init check_ioapic_information(void)
2438{
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002439 const char *fw_bug = FW_BUG;
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002440 bool ret, has_sb_ioapic;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002441 int idx;
2442
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002443 has_sb_ioapic = false;
2444 ret = false;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002445
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002446 /*
2447 * If we have map overrides on the kernel command line the
2448 * messages in this function might not describe firmware bugs
2449 * anymore - so be careful
2450 */
2451 if (cmdline_maps)
2452 fw_bug = "";
2453
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002454 for (idx = 0; idx < nr_ioapics; idx++) {
2455 int devid, id = mpc_ioapic_id(idx);
2456
2457 devid = get_ioapic_devid(id);
2458 if (devid < 0) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002459 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002460 fw_bug, id);
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002461 ret = false;
2462 } else if (devid == IOAPIC_SB_DEVID) {
2463 has_sb_ioapic = true;
2464 ret = true;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002465 }
2466 }
2467
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002468 if (!has_sb_ioapic) {
2469 /*
2470 * We expect the SB IOAPIC to be listed in the IVRS
2471 * table. The system timer is connected to the SB IOAPIC
2472 * and if we don't have it in the list the system will
2473 * panic at boot time. This situation usually happens
2474 * when the BIOS is buggy and provides us the wrong
2475 * device id for the IOAPIC in the system.
2476 */
Joerg Roedel101fa032018-11-27 16:22:31 +01002477 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002478 }
2479
2480 if (!ret)
Joerg Roedel101fa032018-11-27 16:22:31 +01002481 pr_err("Disabling interrupt remapping\n");
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002482
2483 return ret;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002484}
2485
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002486static void __init free_dma_resources(void)
2487{
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002488 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2489 get_order(MAX_DOMAIN_ID/8));
Joerg Roedelf6019272017-06-16 16:09:58 +02002490 amd_iommu_pd_alloc_bitmap = NULL;
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002491
2492 free_unity_maps();
2493}
2494
Joerg Roedelb65233a2008-07-11 17:14:21 +02002495/*
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002496 * This is the hardware init function for AMD IOMMU in the system.
2497 * This function is called either from amd_iommu_init or from the interrupt
2498 * remapping setup code.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002499 *
2500 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002501 * four times:
Joerg Roedelb65233a2008-07-11 17:14:21 +02002502 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002503 * 1 pass) Discover the most comprehensive IVHD type to use.
2504 *
2505 * 2 pass) Find the highest PCI device id the driver has to handle.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002506 * Upon this information the size of the data structures is
2507 * determined that needs to be allocated.
2508 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002509 * 3 pass) Initialize the data structures just allocated with the
Joerg Roedelb65233a2008-07-11 17:14:21 +02002510 * information in the ACPI table about available AMD IOMMUs
2511 * in the system. It also maps the PCI devices in the
2512 * system to specific IOMMUs
2513 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002514 * 4 pass) After the basic data structures are allocated and
Joerg Roedelb65233a2008-07-11 17:14:21 +02002515 * initialized we update them with information about memory
2516 * remapping requirements parsed out of the ACPI table in
2517 * this last pass.
2518 *
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002519 * After everything is set up the IOMMUs are enabled and the necessary
2520 * hotplug and suspend notifiers are registered.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002521 */
Joerg Roedel643511b2012-06-12 12:09:35 +02002522static int __init early_amd_iommu_init(void)
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002523{
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002524 struct acpi_table_header *ivrs_base;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002525 acpi_status status;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002526 int i, remap_cache_sz, ret = 0;
Kai-Heng Feng3dfee472020-02-10 15:51:15 +08002527 u32 pci_id;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002528
Joerg Roedel643511b2012-06-12 12:09:35 +02002529 if (!amd_iommu_detected)
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002530 return -ENODEV;
2531
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002532 status = acpi_get_table("IVRS", 0, &ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002533 if (status == AE_NOT_FOUND)
2534 return -ENODEV;
2535 else if (ACPI_FAILURE(status)) {
2536 const char *err = acpi_format_exception(status);
Joerg Roedel101fa032018-11-27 16:22:31 +01002537 pr_err("IVRS table error: %s\n", err);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002538 return -EINVAL;
2539 }
2540
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002541 /*
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002542 * Validate checksum here so we don't need to do it when
2543 * we actually parse the table
2544 */
2545 ret = check_ivrs_checksum(ivrs_base);
2546 if (ret)
Rafael J. Wysocki99e8ccd2017-01-10 14:57:28 +01002547 goto out;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002548
2549 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2550 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2551
2552 /*
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002553 * First parse ACPI tables to find the largest Bus/Dev/Func
2554 * we need to handle. Upon this information the shared data
2555 * structures for the IOMMUs in the system will be allocated
2556 */
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002557 ret = find_last_devid_acpi(ivrs_base);
2558 if (ret)
Joerg Roedel3551a702010-03-01 13:52:19 +01002559 goto out;
2560
Joerg Roedelc5714842008-07-11 17:14:25 +02002561 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2562 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2563 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002564
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002565 /* Device table - directly used by all IOMMUs */
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002566 ret = -ENOMEM;
Baoquan Heb3367812017-08-09 16:33:42 +08002567 amd_iommu_dev_table = (void *)__get_free_pages(
2568 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002569 get_order(dev_table_size));
2570 if (amd_iommu_dev_table == NULL)
2571 goto out;
2572
2573 /*
2574 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2575 * IOMMU see for that device
2576 */
2577 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2578 get_order(alias_table_size));
2579 if (amd_iommu_alias_table == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002580 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002581
2582 /* IOMMU rlookup table - find the IOMMU for a specific device */
Joerg Roedel83fd5cc2008-12-16 19:17:11 +01002583 amd_iommu_rlookup_table = (void *)__get_free_pages(
2584 GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002585 get_order(rlookup_table_size));
2586 if (amd_iommu_rlookup_table == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002587 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002588
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002589 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2590 GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002591 get_order(MAX_DOMAIN_ID/8));
2592 if (amd_iommu_pd_alloc_bitmap == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002593 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002594
2595 /*
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002596 * let all alias entries point to itself
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002597 */
Joerg Roedel3a61ec32008-07-25 13:07:50 +02002598 for (i = 0; i <= amd_iommu_last_bdf; ++i)
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002599 amd_iommu_alias_table[i] = i;
2600
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002601 /*
2602 * never allocate domain 0 because its used as the non-allocated and
2603 * error value placeholder
2604 */
Baoquan He5c87f622016-09-15 16:50:51 +08002605 __set_bit(0, amd_iommu_pd_alloc_bitmap);
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002606
2607 /*
2608 * now the data structures are allocated and basically initialized
2609 * start the real acpi table scan
2610 */
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002611 ret = init_iommu_all(ivrs_base);
2612 if (ret)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002613 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002614
Kai-Heng Feng3dfee472020-02-10 15:51:15 +08002615 /* Disable IOMMU if there's Stoney Ridge graphics */
2616 for (i = 0; i < 32; i++) {
2617 pci_id = read_pci_config(0, i, 0, 0);
2618 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2619 pr_info("Disable IOMMU on Stoney Ridge\n");
2620 amd_iommu_disabled = true;
2621 break;
2622 }
2623 }
2624
Joerg Roedel11123742017-06-16 16:09:54 +02002625 /* Disable any previously enabled IOMMUs */
Baoquan He20b46df2017-08-09 16:33:44 +08002626 if (!is_kdump_kernel() || amd_iommu_disabled)
2627 disable_iommus();
Joerg Roedel11123742017-06-16 16:09:54 +02002628
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002629 if (amd_iommu_irq_remap)
2630 amd_iommu_irq_remap = check_ioapic_information();
2631
Joerg Roedel05152a02012-06-15 16:53:51 +02002632 if (amd_iommu_irq_remap) {
2633 /*
2634 * Interrupt remapping enabled, create kmem_cache for the
2635 * remapping tables.
2636 */
Wei Yongjun83ed9c12013-04-23 10:47:44 +08002637 ret = -ENOMEM;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002638 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2639 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2640 else
2641 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
Joerg Roedel05152a02012-06-15 16:53:51 +02002642 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002643 remap_cache_sz,
2644 IRQ_TABLE_ALIGNMENT,
2645 0, NULL);
Joerg Roedel05152a02012-06-15 16:53:51 +02002646 if (!amd_iommu_irq_cache)
2647 goto out;
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002648
2649 irq_lookup_table = (void *)__get_free_pages(
2650 GFP_KERNEL | __GFP_ZERO,
2651 get_order(rlookup_table_size));
Lucas Stachebcfa282016-10-26 13:09:53 +02002652 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2653 1, GFP_KERNEL);
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002654 if (!irq_lookup_table)
2655 goto out;
Joerg Roedel05152a02012-06-15 16:53:51 +02002656 }
2657
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002658 ret = init_memory_definitions(ivrs_base);
2659 if (ret)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002660 goto out;
Joerg Roedel3551a702010-03-01 13:52:19 +01002661
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002662 /* init the device table */
2663 init_device_table();
2664
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002665out:
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002666 /* Don't leak any ACPI memory */
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002667 acpi_put_table(ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002668 ivrs_base = NULL;
2669
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002670 return ret;
Joerg Roedel643511b2012-06-12 12:09:35 +02002671}
2672
Gerard Snitselaarae295142012-03-16 11:38:22 -07002673static int amd_iommu_enable_interrupts(void)
Joerg Roedel3d9761e2012-03-15 16:39:21 +01002674{
2675 struct amd_iommu *iommu;
2676 int ret = 0;
2677
2678 for_each_iommu(iommu) {
2679 ret = iommu_init_msi(iommu);
2680 if (ret)
2681 goto out;
2682 }
2683
2684out:
2685 return ret;
2686}
2687
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002688static bool detect_ivrs(void)
2689{
2690 struct acpi_table_header *ivrs_base;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002691 acpi_status status;
2692
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002693 status = acpi_get_table("IVRS", 0, &ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002694 if (status == AE_NOT_FOUND)
2695 return false;
2696 else if (ACPI_FAILURE(status)) {
2697 const char *err = acpi_format_exception(status);
Joerg Roedel101fa032018-11-27 16:22:31 +01002698 pr_err("IVRS table error: %s\n", err);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002699 return false;
2700 }
2701
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002702 acpi_put_table(ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002703
Joerg Roedel1adb7d32012-08-06 14:18:42 +02002704 /* Make sure ACS will be enabled during PCI probe */
2705 pci_request_acs();
2706
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002707 return true;
2708}
2709
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002710/****************************************************************************
2711 *
2712 * AMD IOMMU Initialization State Machine
2713 *
2714 ****************************************************************************/
2715
2716static int __init state_next(void)
2717{
2718 int ret = 0;
2719
2720 switch (init_state) {
2721 case IOMMU_START_STATE:
2722 if (!detect_ivrs()) {
2723 init_state = IOMMU_NOT_FOUND;
2724 ret = -ENODEV;
2725 } else {
2726 init_state = IOMMU_IVRS_DETECTED;
2727 }
2728 break;
2729 case IOMMU_IVRS_DETECTED:
2730 ret = early_amd_iommu_init();
2731 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
Joerg Roedel7ad820e2017-06-16 16:09:59 +02002732 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
Kai-Heng Feng3dfee472020-02-10 15:51:15 +08002733 pr_info("AMD IOMMU disabled\n");
Joerg Roedel7ad820e2017-06-16 16:09:59 +02002734 init_state = IOMMU_CMDLINE_DISABLED;
2735 ret = -EINVAL;
2736 }
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002737 break;
2738 case IOMMU_ACPI_FINISHED:
2739 early_enable_iommus();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002740 x86_platform.iommu_shutdown = disable_iommus;
2741 init_state = IOMMU_ENABLED;
2742 break;
2743 case IOMMU_ENABLED:
Joerg Roedel74ddda72017-07-26 14:17:55 +02002744 register_syscore_ops(&amd_iommu_syscore_ops);
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002745 ret = amd_iommu_init_pci();
2746 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2747 enable_iommus_v2();
2748 break;
2749 case IOMMU_PCI_INIT:
2750 ret = amd_iommu_enable_interrupts();
2751 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2752 break;
2753 case IOMMU_INTERRUPTS_EN:
Joerg Roedel1e6a7b02015-07-28 16:58:48 +02002754 ret = amd_iommu_init_dma_ops();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002755 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2756 break;
2757 case IOMMU_DMA_OPS:
2758 init_state = IOMMU_INITIALIZED;
2759 break;
2760 case IOMMU_INITIALIZED:
2761 /* Nothing to do */
2762 break;
2763 case IOMMU_NOT_FOUND:
2764 case IOMMU_INIT_ERROR:
Joerg Roedel1b1e9422017-06-16 16:09:56 +02002765 case IOMMU_CMDLINE_DISABLED:
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002766 /* Error states => do nothing */
2767 ret = -EINVAL;
2768 break;
2769 default:
2770 /* Unknown state */
2771 BUG();
2772 }
2773
Kevin Mitchell5c905012019-06-12 14:52:05 -07002774 if (ret) {
2775 free_dma_resources();
2776 if (!irq_remapping_enabled) {
2777 disable_iommus();
2778 free_iommu_resources();
2779 } else {
2780 struct amd_iommu *iommu;
2781
2782 uninit_device_table_dma();
2783 for_each_iommu(iommu)
2784 iommu_flush_all_caches(iommu);
2785 }
2786 }
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002787 return ret;
2788}
2789
2790static int __init iommu_go_to_state(enum iommu_init_state state)
2791{
Joerg Roedel151b0902017-06-16 16:09:57 +02002792 int ret = -EINVAL;
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002793
2794 while (init_state != state) {
Joerg Roedel1b1e9422017-06-16 16:09:56 +02002795 if (init_state == IOMMU_NOT_FOUND ||
2796 init_state == IOMMU_INIT_ERROR ||
2797 init_state == IOMMU_CMDLINE_DISABLED)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002798 break;
Joerg Roedel151b0902017-06-16 16:09:57 +02002799 ret = state_next();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002800 }
2801
2802 return ret;
2803}
2804
Joerg Roedel6b474b82012-06-26 16:46:04 +02002805#ifdef CONFIG_IRQ_REMAP
2806int __init amd_iommu_prepare(void)
2807{
Thomas Gleixner3f4cb7c2015-01-23 14:32:46 +01002808 int ret;
2809
Jiang Liu7fa1c842015-01-07 15:31:42 +08002810 amd_iommu_irq_remap = true;
Joerg Roedel84d07792015-01-07 15:31:39 +08002811
Thomas Gleixner3f4cb7c2015-01-23 14:32:46 +01002812 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2813 if (ret)
2814 return ret;
2815 return amd_iommu_irq_remap ? 0 : -ENODEV;
Joerg Roedel6b474b82012-06-26 16:46:04 +02002816}
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002817
Joerg Roedel6b474b82012-06-26 16:46:04 +02002818int __init amd_iommu_enable(void)
2819{
2820 int ret;
2821
2822 ret = iommu_go_to_state(IOMMU_ENABLED);
2823 if (ret)
2824 return ret;
2825
2826 irq_remapping_enabled = 1;
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002827 return amd_iommu_xt_mode;
Joerg Roedel6b474b82012-06-26 16:46:04 +02002828}
2829
2830void amd_iommu_disable(void)
2831{
2832 amd_iommu_suspend();
2833}
2834
2835int amd_iommu_reenable(int mode)
2836{
2837 amd_iommu_resume();
2838
2839 return 0;
2840}
2841
2842int __init amd_iommu_enable_faulting(void)
2843{
2844 /* We enable MSI later when PCI is initialized */
2845 return 0;
2846}
2847#endif
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002848
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002849/*
2850 * This is the core init function for AMD IOMMU hardware in the system.
2851 * This function is called from the generic x86 DMA layer initialization
2852 * code.
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002853 */
2854static int __init amd_iommu_init(void)
2855{
Gary R Hook7d0f5fd2018-06-12 16:41:30 -05002856 struct amd_iommu *iommu;
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002857 int ret;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002858
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002859 ret = iommu_go_to_state(IOMMU_INITIALIZED);
Kevin Mitchellbf4bff42019-06-12 14:52:04 -07002860#ifdef CONFIG_GART_IOMMU
2861 if (ret && list_empty(&amd_iommu_list)) {
2862 /*
2863 * We failed to initialize the AMD IOMMU - try fallback
2864 * to GART if possible.
2865 */
2866 gart_iommu_init();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002867 }
Kevin Mitchellbf4bff42019-06-12 14:52:04 -07002868#endif
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002869
Gary R Hook7d0f5fd2018-06-12 16:41:30 -05002870 for_each_iommu(iommu)
2871 amd_iommu_debugfs_setup(iommu);
2872
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002873 return ret;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002874}
2875
Tom Lendacky2543a782017-07-17 16:10:24 -05002876static bool amd_iommu_sme_check(void)
2877{
2878 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2879 return true;
2880
2881 /* For Fam17h, a specific level of support is required */
2882 if (boot_cpu_data.microcode >= 0x08001205)
2883 return true;
2884
2885 if ((boot_cpu_data.microcode >= 0x08001126) &&
2886 (boot_cpu_data.microcode <= 0x080011ff))
2887 return true;
2888
Joerg Roedel101fa032018-11-27 16:22:31 +01002889 pr_notice("IOMMU not currently supported when SME is active\n");
Tom Lendacky2543a782017-07-17 16:10:24 -05002890
2891 return false;
2892}
2893
Joerg Roedelb65233a2008-07-11 17:14:21 +02002894/****************************************************************************
2895 *
2896 * Early detect code. This code runs at IOMMU detection time in the DMA
2897 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2898 * IOMMUs
2899 *
2900 ****************************************************************************/
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -04002901int __init amd_iommu_detect(void)
Joerg Roedelae7877d2008-06-26 21:27:51 +02002902{
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002903 int ret;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002904
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09002905 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -04002906 return -ENODEV;
Joerg Roedelae7877d2008-06-26 21:27:51 +02002907
Tom Lendacky2543a782017-07-17 16:10:24 -05002908 if (!amd_iommu_sme_check())
2909 return -ENODEV;
2910
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002911 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2912 if (ret)
2913 return ret;
Linus Torvalds11bd04f2009-12-11 12:18:16 -08002914
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002915 amd_iommu_detected = true;
2916 iommu_detected = 1;
2917 x86_init.iommu.iommu_init = amd_iommu_init;
2918
Jérôme Glisse4781bc42015-08-31 18:13:03 -04002919 return 1;
Joerg Roedelae7877d2008-06-26 21:27:51 +02002920}
2921
Joerg Roedelb65233a2008-07-11 17:14:21 +02002922/****************************************************************************
2923 *
2924 * Parsing functions for the AMD IOMMU specific kernel command line
2925 * options.
2926 *
2927 ****************************************************************************/
2928
Joerg Roedelfefda112009-05-20 12:21:42 +02002929static int __init parse_amd_iommu_dump(char *str)
2930{
2931 amd_iommu_dump = true;
2932
2933 return 1;
2934}
2935
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002936static int __init parse_amd_iommu_intr(char *str)
2937{
2938 for (; *str; ++str) {
2939 if (strncmp(str, "legacy", 6) == 0) {
Suravee Suthikulpanitb74aa022020-04-22 08:30:02 -05002940 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002941 break;
2942 }
2943 if (strncmp(str, "vapic", 5) == 0) {
2944 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2945 break;
2946 }
2947 }
2948 return 1;
2949}
2950
Joerg Roedel918ad6c2008-06-26 21:27:52 +02002951static int __init parse_amd_iommu_options(char *str)
2952{
2953 for (; *str; ++str) {
Joerg Roedel695b5672008-11-17 15:16:43 +01002954 if (strncmp(str, "fullflush", 9) == 0)
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +09002955 amd_iommu_unmap_flush = true;
Joerg Roedela5235722010-05-11 17:12:33 +02002956 if (strncmp(str, "off", 3) == 0)
2957 amd_iommu_disabled = true;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002958 if (strncmp(str, "force_isolation", 15) == 0)
2959 amd_iommu_force_isolation = true;
Joerg Roedel918ad6c2008-06-26 21:27:52 +02002960 }
2961
2962 return 1;
2963}
2964
Joerg Roedel440e89982013-04-09 16:35:28 +02002965static int __init parse_ivrs_ioapic(char *str)
2966{
2967 unsigned int bus, dev, fn;
2968 int ret, id, i;
2969 u16 devid;
2970
2971 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2972
2973 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002974 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
Joerg Roedel440e89982013-04-09 16:35:28 +02002975 return 1;
2976 }
2977
2978 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002979 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
Joerg Roedel440e89982013-04-09 16:35:28 +02002980 str);
2981 return 1;
2982 }
2983
2984 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2985
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002986 cmdline_maps = true;
Joerg Roedel440e89982013-04-09 16:35:28 +02002987 i = early_ioapic_map_size++;
2988 early_ioapic_map[i].id = id;
2989 early_ioapic_map[i].devid = devid;
2990 early_ioapic_map[i].cmd_line = true;
2991
2992 return 1;
2993}
2994
2995static int __init parse_ivrs_hpet(char *str)
2996{
2997 unsigned int bus, dev, fn;
2998 int ret, id, i;
2999 u16 devid;
3000
3001 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3002
3003 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003004 pr_err("Invalid command line: ivrs_hpet%s\n", str);
Joerg Roedel440e89982013-04-09 16:35:28 +02003005 return 1;
3006 }
3007
3008 if (early_hpet_map_size == EARLY_MAP_SIZE) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003009 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
Joerg Roedel440e89982013-04-09 16:35:28 +02003010 str);
3011 return 1;
3012 }
3013
3014 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3015
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02003016 cmdline_maps = true;
Joerg Roedel440e89982013-04-09 16:35:28 +02003017 i = early_hpet_map_size++;
3018 early_hpet_map[i].id = id;
3019 early_hpet_map[i].devid = devid;
3020 early_hpet_map[i].cmd_line = true;
3021
3022 return 1;
3023}
3024
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04003025static int __init parse_ivrs_acpihid(char *str)
3026{
3027 u32 bus, dev, fn;
3028 char *hid, *uid, *p;
3029 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3030 int ret, i;
3031
3032 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3033 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003034 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04003035 return 1;
3036 }
3037
3038 p = acpiid;
3039 hid = strsep(&p, ":");
3040 uid = p;
3041
3042 if (!hid || !(*hid) || !uid) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003043 pr_err("Invalid command line: hid or uid\n");
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04003044 return 1;
3045 }
3046
3047 i = early_acpihid_map_size++;
3048 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3049 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3050 early_acpihid_map[i].devid =
3051 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3052 early_acpihid_map[i].cmd_line = true;
3053
3054 return 1;
3055}
3056
Joerg Roedel440e89982013-04-09 16:35:28 +02003057__setup("amd_iommu_dump", parse_amd_iommu_dump);
3058__setup("amd_iommu=", parse_amd_iommu_options);
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05003059__setup("amd_iommu_intr=", parse_amd_iommu_intr);
Joerg Roedel440e89982013-04-09 16:35:28 +02003060__setup("ivrs_ioapic", parse_ivrs_ioapic);
3061__setup("ivrs_hpet", parse_ivrs_hpet);
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04003062__setup("ivrs_acpihid", parse_ivrs_acpihid);
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -04003063
3064IOMMU_INIT_FINISH(amd_iommu_detect,
3065 gart_iommu_hole_init,
Joerg Roedel98f1ad22012-07-06 13:28:37 +02003066 NULL,
3067 NULL);
Joerg Roedel400a28a2011-11-28 15:11:02 +01003068
3069bool amd_iommu_v2_supported(void)
3070{
3071 return amd_iommu_v2_present;
3072}
3073EXPORT_SYMBOL(amd_iommu_v2_supported);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003074
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003075struct amd_iommu *get_amd_iommu(unsigned int idx)
3076{
3077 unsigned int i = 0;
3078 struct amd_iommu *iommu;
3079
3080 for_each_iommu(iommu)
3081 if (i++ == idx)
3082 return iommu;
3083 return NULL;
3084}
3085EXPORT_SYMBOL(get_amd_iommu);
3086
Steven L Kinney30861dd2013-06-05 16:11:48 -05003087/****************************************************************************
3088 *
3089 * IOMMU EFR Performance Counter support functionality. This code allows
3090 * access to the IOMMU PC functionality.
3091 *
3092 ****************************************************************************/
3093
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003094u8 amd_iommu_pc_get_max_banks(unsigned int idx)
Steven L Kinney30861dd2013-06-05 16:11:48 -05003095{
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003096 struct amd_iommu *iommu = get_amd_iommu(idx);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003097
Steven L Kinney30861dd2013-06-05 16:11:48 -05003098 if (iommu)
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003099 return iommu->max_banks;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003100
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003101 return 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003102}
3103EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3104
3105bool amd_iommu_pc_supported(void)
3106{
3107 return amd_iommu_pc_present;
3108}
3109EXPORT_SYMBOL(amd_iommu_pc_supported);
3110
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003111u8 amd_iommu_pc_get_max_counters(unsigned int idx)
Steven L Kinney30861dd2013-06-05 16:11:48 -05003112{
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003113 struct amd_iommu *iommu = get_amd_iommu(idx);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003114
Steven L Kinney30861dd2013-06-05 16:11:48 -05003115 if (iommu)
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003116 return iommu->max_counters;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003117
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003118 return 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003119}
3120EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3121
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003122static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3123 u8 fxn, u64 *value, bool is_write)
Steven L Kinney30861dd2013-06-05 16:11:48 -05003124{
Steven L Kinney30861dd2013-06-05 16:11:48 -05003125 u32 offset;
3126 u32 max_offset_lim;
3127
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003128 /* Make sure the IOMMU PC resource is available */
3129 if (!amd_iommu_pc_present)
3130 return -ENODEV;
3131
Steven L Kinney30861dd2013-06-05 16:11:48 -05003132 /* Check for valid iommu and pc register indexing */
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003133 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
Steven L Kinney30861dd2013-06-05 16:11:48 -05003134 return -ENODEV;
3135
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003136 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003137
3138 /* Limit the offset to the hw defined mmio region aperture */
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003139 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
Steven L Kinney30861dd2013-06-05 16:11:48 -05003140 (iommu->max_counters << 8) | 0x28);
3141 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3142 (offset > max_offset_lim))
3143 return -EINVAL;
3144
3145 if (is_write) {
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003146 u64 val = *value & GENMASK_ULL(47, 0);
3147
3148 writel((u32)val, iommu->mmio_base + offset);
3149 writel((val >> 32), iommu->mmio_base + offset + 4);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003150 } else {
3151 *value = readl(iommu->mmio_base + offset + 4);
3152 *value <<= 32;
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003153 *value |= readl(iommu->mmio_base + offset);
3154 *value &= GENMASK_ULL(47, 0);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003155 }
3156
3157 return 0;
3158}
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003159
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003160int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003161{
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003162 if (!iommu)
3163 return -EINVAL;
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003164
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003165 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003166}
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003167EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3168
3169int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3170{
3171 if (!iommu)
3172 return -EINVAL;
3173
3174 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3175}
3176EXPORT_SYMBOL(amd_iommu_pc_set_reg);