blob: 35f220704bef377fdaca9dc475b5fc9062bdfcea [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02002/*
Joerg Roedel5d0d7152010-10-13 11:13:21 +02003 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01004 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02005 * Leo Duran <leo.duran@amd.com>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02006 */
7
Joerg Roedel101fa032018-11-27 16:22:31 +01008#define pr_fmt(fmt) "AMD-Vi: " fmt
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06009#define dev_fmt(fmt) pr_fmt(fmt)
Joerg Roedel101fa032018-11-27 16:22:31 +010010
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020011#include <linux/pci.h>
12#include <linux/acpi.h>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020013#include <linux/list.h>
Baoquan He5c87f622016-09-15 16:50:51 +080014#include <linux/bitmap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +010016#include <linux/syscore_ops.h>
Joerg Roedela80dc3e2008-09-11 16:51:41 +020017#include <linux/interrupt.h>
18#include <linux/msi.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020019#include <linux/amd-iommu.h>
Joerg Roedel400a28a2011-11-28 15:11:02 +010020#include <linux/export.h>
Lucas Stachebcfa282016-10-26 13:09:53 +020021#include <linux/kmemleak.h>
Tom Lendacky2543a782017-07-17 16:10:24 -050022#include <linux/mem_encrypt.h>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020023#include <asm/pci-direct.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090024#include <asm/iommu.h>
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +000025#include <asm/apic.h>
26#include <asm/msidef.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010027#include <asm/gart.h>
FUJITA Tomonoriea1b0d32009-11-10 19:46:15 +090028#include <asm/x86_init.h>
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -040029#include <asm/iommu_table.h>
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +020030#include <asm/io_apic.h>
Joerg Roedel6b474b82012-06-26 16:46:04 +020031#include <asm/irq_remapping.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020032
Baoquan He3ac3e5ee2017-08-09 16:33:38 +080033#include <linux/crash_dump.h>
Joerg Roedel786dfe42020-05-27 13:53:11 +020034
Kai-Heng Feng93d05152019-08-21 13:10:04 +080035#include "amd_iommu.h"
Joerg Roedelad8694b2020-06-09 15:03:02 +020036#include "../irq_remapping.h"
Joerg Roedel403f81d2011-06-14 16:44:25 +020037
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020038/*
39 * definitions for the ACPI scanning code
40 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020041#define IVRS_HEADER_LENGTH 48
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020042
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -040043#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020044#define ACPI_IVMD_TYPE_ALL 0x20
45#define ACPI_IVMD_TYPE 0x21
46#define ACPI_IVMD_TYPE_RANGE 0x22
47
48#define IVHD_DEV_ALL 0x01
49#define IVHD_DEV_SELECT 0x02
50#define IVHD_DEV_SELECT_RANGE_START 0x03
51#define IVHD_DEV_RANGE_END 0x04
52#define IVHD_DEV_ALIAS 0x42
53#define IVHD_DEV_ALIAS_RANGE 0x43
54#define IVHD_DEV_EXT_SELECT 0x46
55#define IVHD_DEV_EXT_SELECT_RANGE 0x47
Joerg Roedel6efed632012-06-14 15:52:58 +020056#define IVHD_DEV_SPECIAL 0x48
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -040057#define IVHD_DEV_ACPI_HID 0xf0
Joerg Roedel6efed632012-06-14 15:52:58 +020058
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -040059#define UID_NOT_PRESENT 0
60#define UID_IS_INTEGER 1
61#define UID_IS_CHARACTER 2
62
Joerg Roedel6efed632012-06-14 15:52:58 +020063#define IVHD_SPECIAL_IOAPIC 1
64#define IVHD_SPECIAL_HPET 2
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020065
Joerg Roedel6da73422009-05-04 11:44:38 +020066#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
67#define IVHD_FLAG_PASSPW_EN_MASK 0x02
68#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
69#define IVHD_FLAG_ISOC_EN_MASK 0x08
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020070
71#define IVMD_FLAG_EXCL_RANGE 0x08
Adrian Huang387caf02019-11-14 14:14:47 +080072#define IVMD_FLAG_IW 0x04
73#define IVMD_FLAG_IR 0x02
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020074#define IVMD_FLAG_UNITY_MAP 0x01
75
76#define ACPI_DEVFLAG_INITPASS 0x01
77#define ACPI_DEVFLAG_EXTINT 0x02
78#define ACPI_DEVFLAG_NMI 0x04
79#define ACPI_DEVFLAG_SYSMGT1 0x10
80#define ACPI_DEVFLAG_SYSMGT2 0x20
81#define ACPI_DEVFLAG_LINT0 0x40
82#define ACPI_DEVFLAG_LINT1 0x80
83#define ACPI_DEVFLAG_ATSDIS 0x10000000
84
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -050085#define LOOP_TIMEOUT 100000
Joerg Roedelb65233a2008-07-11 17:14:21 +020086/*
87 * ACPI table definitions
88 *
89 * These data structures are laid over the table to parse the important values
90 * out of it.
91 */
92
Joerg Roedelb0119e82017-02-01 13:23:08 +010093extern const struct iommu_ops amd_iommu_ops;
94
Joerg Roedelb65233a2008-07-11 17:14:21 +020095/*
96 * structure describing one IOMMU in the ACPI table. Typically followed by one
97 * or more ivhd_entrys.
98 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020099struct ivhd_header {
100 u8 type;
101 u8 flags;
102 u16 length;
103 u16 devid;
104 u16 cap_ptr;
105 u64 mmio_phys;
106 u16 pci_seg;
107 u16 info;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -0400108 u32 efr_attr;
109
110 /* Following only valid on IVHD type 11h and 40h */
111 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
112 u64 res;
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200113} __attribute__((packed));
114
Joerg Roedelb65233a2008-07-11 17:14:21 +0200115/*
116 * A device entry describing which devices a specific IOMMU translates and
117 * which requestor ids they use.
118 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200119struct ivhd_entry {
120 u8 type;
121 u16 devid;
122 u8 flags;
123 u32 ext;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400124 u32 hidh;
125 u64 cid;
126 u8 uidf;
127 u8 uidl;
128 u8 uid;
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200129} __attribute__((packed));
130
Joerg Roedelb65233a2008-07-11 17:14:21 +0200131/*
132 * An AMD IOMMU memory definition structure. It defines things like exclusion
133 * ranges for devices and regions that should be unity mapped.
134 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200135struct ivmd_header {
136 u8 type;
137 u8 flags;
138 u16 length;
139 u16 devid;
140 u16 aux;
141 u64 resv;
142 u64 range_start;
143 u64 range_length;
144} __attribute__((packed));
145
Joerg Roedelfefda112009-05-20 12:21:42 +0200146bool amd_iommu_dump;
Joerg Roedel05152a02012-06-15 16:53:51 +0200147bool amd_iommu_irq_remap __read_mostly;
Joerg Roedelfefda112009-05-20 12:21:42 +0200148
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -0500149int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
Suravee Suthikulpanit81307142019-11-20 07:55:48 -0600150static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -0500151
Joerg Roedel02f3b3f2012-06-11 17:45:25 +0200152static bool amd_iommu_detected;
Joerg Roedela5235722010-05-11 17:12:33 +0200153static bool __initdata amd_iommu_disabled;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400154static int amd_iommu_target_ivhd_type;
Joerg Roedelc1cbebe2008-07-03 19:35:10 +0200155
Joerg Roedelb65233a2008-07-11 17:14:21 +0200156u16 amd_iommu_last_bdf; /* largest PCI device id we have
157 to handle */
Joerg Roedel2e228472008-07-11 17:14:31 +0200158LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
Joerg Roedelb65233a2008-07-11 17:14:21 +0200159 we find in ACPI */
Viresh Kumar621a5f72015-09-26 15:04:07 -0700160bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
Joerg Roedel928abd22008-06-26 21:27:40 +0200161
Joerg Roedel2e228472008-07-11 17:14:31 +0200162LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
Joerg Roedelb65233a2008-07-11 17:14:21 +0200163 system */
164
Joerg Roedelbb527772009-11-20 14:31:51 +0100165/* Array to assign indices to IOMMUs*/
166struct amd_iommu *amd_iommus[MAX_IOMMUS];
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -0600167
168/* Number of IOMMUs present in the system */
169static int amd_iommus_present;
Joerg Roedelbb527772009-11-20 14:31:51 +0100170
Joerg Roedel318afd42009-11-23 18:32:38 +0100171/* IOMMUs have a non-present cache? */
172bool amd_iommu_np_cache __read_mostly;
Joerg Roedel60f723b2011-04-05 12:50:24 +0200173bool amd_iommu_iotlb_sup __read_mostly = true;
Joerg Roedel318afd42009-11-23 18:32:38 +0100174
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600175u32 amd_iommu_max_pasid __read_mostly = ~0;
Joerg Roedel62f71ab2011-11-10 14:41:57 +0100176
Joerg Roedel400a28a2011-11-28 15:11:02 +0100177bool amd_iommu_v2_present __read_mostly;
Joerg Roedel4160cd92015-08-13 11:31:48 +0200178static bool amd_iommu_pc_present __read_mostly;
Joerg Roedel400a28a2011-11-28 15:11:02 +0100179
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100180bool amd_iommu_force_isolation __read_mostly;
181
Joerg Roedelb65233a2008-07-11 17:14:21 +0200182/*
183 * Pointer to the device table which is shared by all AMD IOMMUs
184 * it is indexed by the PCI device id or the HT unit id and contains
185 * information about the domain the device belongs to as well as the
186 * page table root pointer.
187 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200188struct dev_table_entry *amd_iommu_dev_table;
Baoquan He45a01c42017-08-09 16:33:37 +0800189/*
190 * Pointer to a device table which the content of old device table
191 * will be copied to. It's only be used in kdump kernel.
192 */
193static struct dev_table_entry *old_dev_tbl_cpy;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200194
195/*
196 * The alias table is a driver specific data structure which contains the
197 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
198 * More than one device can share the same requestor id.
199 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200200u16 *amd_iommu_alias_table;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200201
202/*
203 * The rlookup table is used to find the IOMMU which is responsible
204 * for a specific device. It is also indexed by the PCI device id.
205 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200206struct amd_iommu **amd_iommu_rlookup_table;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800207EXPORT_SYMBOL(amd_iommu_rlookup_table);
Joerg Roedelb65233a2008-07-11 17:14:21 +0200208
209/*
Joerg Roedel0ea2c422012-06-15 18:05:20 +0200210 * This table is used to find the irq remapping table for a given device id
211 * quickly.
212 */
213struct irq_remap_table **irq_lookup_table;
214
215/*
Frank Arnolddf805ab2012-08-27 19:21:04 +0200216 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
Joerg Roedelb65233a2008-07-11 17:14:21 +0200217 * to know which ones are already in use.
218 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200219unsigned long *amd_iommu_pd_alloc_bitmap;
220
Joerg Roedelb65233a2008-07-11 17:14:21 +0200221static u32 dev_table_size; /* size of the device table */
222static u32 alias_table_size; /* size of the alias table */
223static u32 rlookup_table_size; /* size if the rlookup table */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200224
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200225enum iommu_init_state {
226 IOMMU_START_STATE,
227 IOMMU_IVRS_DETECTED,
228 IOMMU_ACPI_FINISHED,
229 IOMMU_ENABLED,
230 IOMMU_PCI_INIT,
231 IOMMU_INTERRUPTS_EN,
232 IOMMU_DMA_OPS,
233 IOMMU_INITIALIZED,
234 IOMMU_NOT_FOUND,
235 IOMMU_INIT_ERROR,
Joerg Roedel1b1e9422017-06-16 16:09:56 +0200236 IOMMU_CMDLINE_DISABLED,
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200237};
238
Joerg Roedel235dacb2013-04-09 17:53:14 +0200239/* Early ioapic and hpet maps from kernel command line */
240#define EARLY_MAP_SIZE 4
241static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
242static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400243static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
244
Joerg Roedel235dacb2013-04-09 17:53:14 +0200245static int __initdata early_ioapic_map_size;
246static int __initdata early_hpet_map_size;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400247static int __initdata early_acpihid_map_size;
248
Joerg Roedeldfbb6d42013-04-09 19:06:18 +0200249static bool __initdata cmdline_maps;
Joerg Roedel235dacb2013-04-09 17:53:14 +0200250
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200251static enum iommu_init_state init_state = IOMMU_START_STATE;
252
Gerard Snitselaarae295142012-03-16 11:38:22 -0700253static int amd_iommu_enable_interrupts(void);
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200254static int __init iommu_go_to_state(enum iommu_init_state state);
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200255static void init_device_table_dma(void);
Joerg Roedel3d9761e2012-03-15 16:39:21 +0100256
Joerg Roedel2479c632017-08-19 00:35:40 +0200257static bool amd_iommu_pre_enabled = true;
Baoquan He3ac3e5ee2017-08-09 16:33:38 +0800258
Baoquan He4c232a72017-08-09 16:33:33 +0800259bool translation_pre_enabled(struct amd_iommu *iommu)
260{
261 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
262}
Baoquan Hedaae2d22017-08-09 16:33:43 +0800263EXPORT_SYMBOL(translation_pre_enabled);
Baoquan He4c232a72017-08-09 16:33:33 +0800264
265static void clear_translation_pre_enabled(struct amd_iommu *iommu)
266{
267 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
268}
269
270static void init_translation_status(struct amd_iommu *iommu)
271{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500272 u64 ctrl;
Baoquan He4c232a72017-08-09 16:33:33 +0800273
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500274 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
Baoquan He4c232a72017-08-09 16:33:33 +0800275 if (ctrl & (1<<CONTROL_IOMMU_EN))
276 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
277}
278
Joerg Roedel208ec8c2008-07-11 17:14:24 +0200279static inline void update_last_devid(u16 devid)
280{
281 if (devid > amd_iommu_last_bdf)
282 amd_iommu_last_bdf = devid;
283}
284
Joerg Roedelc5714842008-07-11 17:14:25 +0200285static inline unsigned long tbl_size(int entry_size)
286{
287 unsigned shift = PAGE_SHIFT +
Neil Turton421f9092009-05-14 14:00:35 +0100288 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
Joerg Roedelc5714842008-07-11 17:14:25 +0200289
290 return 1UL << shift;
291}
292
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -0600293int amd_iommu_get_num_iommus(void)
294{
295 return amd_iommus_present;
296}
297
Matthew Garrett5bcd7572010-10-04 14:59:31 -0400298/* Access to l1 and l2 indexed register spaces */
299
300static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
301{
302 u32 val;
303
304 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
305 pci_read_config_dword(iommu->dev, 0xfc, &val);
306 return val;
307}
308
309static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
310{
311 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
312 pci_write_config_dword(iommu->dev, 0xfc, val);
313 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
314}
315
316static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
317{
318 u32 val;
319
320 pci_write_config_dword(iommu->dev, 0xf0, address);
321 pci_read_config_dword(iommu->dev, 0xf4, &val);
322 return val;
323}
324
325static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
326{
327 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
328 pci_write_config_dword(iommu->dev, 0xf4, val);
329}
330
Joerg Roedelb65233a2008-07-11 17:14:21 +0200331/****************************************************************************
332 *
333 * AMD IOMMU MMIO register space handling functions
334 *
335 * These functions are used to program the IOMMU device registers in
336 * MMIO space required for that driver.
337 *
338 ****************************************************************************/
339
340/*
341 * This function set the exclusion range in the IOMMU. DMA accesses to the
342 * exclusion range are passed through untranslated
343 */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200344static void iommu_set_exclusion_range(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200345{
346 u64 start = iommu->exclusion_start & PAGE_MASK;
Joerg Roedel3c677d202019-04-12 12:50:31 +0200347 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200348 u64 entry;
349
350 if (!iommu->exclusion_start)
351 return;
352
353 entry = start | MMIO_EXCL_ENABLE_MASK;
354 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
355 &entry, sizeof(entry));
356
357 entry = limit;
358 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
359 &entry, sizeof(entry));
360}
361
Joerg Roedelb65233a2008-07-11 17:14:21 +0200362/* Programs the physical address of the device table into the IOMMU hardware */
Jan Beulich6b7f0002012-03-08 08:58:13 +0000363static void iommu_set_device_table(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200364{
Andreas Herrmannf6098912008-10-16 16:27:36 +0200365 u64 entry;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200366
367 BUG_ON(iommu->mmio_base == NULL);
368
Tom Lendacky2543a782017-07-17 16:10:24 -0500369 entry = iommu_virt_to_phys(amd_iommu_dev_table);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200370 entry |= (dev_table_size >> 12) - 1;
371 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
372 &entry, sizeof(entry));
373}
374
Joerg Roedelb65233a2008-07-11 17:14:21 +0200375/* Generic functions to enable/disable certain features of the IOMMU. */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200376static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200377{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500378 u64 ctrl;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200379
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500380 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
381 ctrl |= (1ULL << bit);
382 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200383}
384
Joerg Roedelca0207112009-10-28 18:02:26 +0100385static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200386{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500387 u64 ctrl;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200388
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500389 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
390 ctrl &= ~(1ULL << bit);
391 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200392}
393
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100394static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
395{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500396 u64 ctrl;
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100397
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500398 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100399 ctrl &= ~CTRL_INV_TO_MASK;
400 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500401 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100402}
403
Joerg Roedelb65233a2008-07-11 17:14:21 +0200404/* Function to enable the hardware */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200405static void iommu_enable(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200406{
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200407 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200408}
409
Joerg Roedel92ac4322009-05-19 19:06:27 +0200410static void iommu_disable(struct amd_iommu *iommu)
Joerg Roedel126c52b2008-09-09 16:47:35 +0200411{
Kevin Mitchell3ddbe912019-06-12 14:52:03 -0700412 if (!iommu->mmio_base)
413 return;
414
Chris Wrighta8c485b2009-06-15 15:53:45 +0200415 /* Disable command buffer */
416 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
417
418 /* Disable event logging and event interrupts */
419 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
420 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
421
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500422 /* Disable IOMMU GA_LOG */
423 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
424 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
425
Chris Wrighta8c485b2009-06-15 15:53:45 +0200426 /* Disable IOMMU hardware itself */
Joerg Roedel92ac4322009-05-19 19:06:27 +0200427 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
Joerg Roedel126c52b2008-09-09 16:47:35 +0200428}
429
Joerg Roedelb65233a2008-07-11 17:14:21 +0200430/*
431 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
432 * the system has one.
433 */
Steven L Kinney30861dd2013-06-05 16:11:48 -0500434static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
Joerg Roedel6c567472008-06-26 21:27:43 +0200435{
Steven L Kinney30861dd2013-06-05 16:11:48 -0500436 if (!request_mem_region(address, end, "amd_iommu")) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100437 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
Steven L Kinney30861dd2013-06-05 16:11:48 -0500438 address, end);
Joerg Roedel101fa032018-11-27 16:22:31 +0100439 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
Joerg Roedel6c567472008-06-26 21:27:43 +0200440 return NULL;
Joerg Roedele82752d2010-05-28 14:26:48 +0200441 }
Joerg Roedel6c567472008-06-26 21:27:43 +0200442
Christoph Hellwig4bdc0d62020-01-06 09:43:50 +0100443 return (u8 __iomem *)ioremap(address, end);
Joerg Roedel6c567472008-06-26 21:27:43 +0200444}
445
446static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
447{
448 if (iommu->mmio_base)
449 iounmap(iommu->mmio_base);
Steven L Kinney30861dd2013-06-05 16:11:48 -0500450 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
Joerg Roedel6c567472008-06-26 21:27:43 +0200451}
452
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400453static inline u32 get_ivhd_header_size(struct ivhd_header *h)
454{
455 u32 size = 0;
456
457 switch (h->type) {
458 case 0x10:
459 size = 24;
460 break;
461 case 0x11:
462 case 0x40:
463 size = 40;
464 break;
465 }
466 return size;
467}
468
Joerg Roedelb65233a2008-07-11 17:14:21 +0200469/****************************************************************************
470 *
471 * The functions below belong to the first pass of AMD IOMMU ACPI table
472 * parsing. In this pass we try to find out the highest device id this
473 * code has to handle. Upon this information the size of the shared data
474 * structures is determined later.
475 *
476 ****************************************************************************/
477
478/*
Joerg Roedelb514e552008-09-17 17:14:27 +0200479 * This function calculates the length of a given IVHD entry
480 */
481static inline int ivhd_entry_length(u8 *ivhd)
482{
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400483 u32 type = ((struct ivhd_entry *)ivhd)->type;
484
485 if (type < 0x80) {
486 return 0x04 << (*ivhd >> 6);
487 } else if (type == IVHD_DEV_ACPI_HID) {
488 /* For ACPI_HID, offset 21 is uid len */
489 return *((u8 *)ivhd + 21) + 22;
490 }
491 return 0;
Joerg Roedelb514e552008-09-17 17:14:27 +0200492}
493
494/*
Joerg Roedelb65233a2008-07-11 17:14:21 +0200495 * After reading the highest device id from the IOMMU PCI capability header
496 * this function looks if there is a higher device id defined in the ACPI table
497 */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200498static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
499{
500 u8 *p = (void *)h, *end = (void *)h;
501 struct ivhd_entry *dev;
502
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400503 u32 ivhd_size = get_ivhd_header_size(h);
504
505 if (!ivhd_size) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100506 pr_err("Unsupported IVHD type %#x\n", h->type);
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400507 return -EINVAL;
508 }
509
510 p += ivhd_size;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200511 end += h->length;
512
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200513 while (p < end) {
514 dev = (struct ivhd_entry *)p;
515 switch (dev->type) {
Joerg Roedeld1259412015-10-20 17:33:43 +0200516 case IVHD_DEV_ALL:
517 /* Use maximum BDF value for DEV_ALL */
518 update_last_devid(0xffff);
519 break;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200520 case IVHD_DEV_SELECT:
521 case IVHD_DEV_RANGE_END:
522 case IVHD_DEV_ALIAS:
523 case IVHD_DEV_EXT_SELECT:
Joerg Roedelb65233a2008-07-11 17:14:21 +0200524 /* all the above subfield types refer to device ids */
Joerg Roedel208ec8c2008-07-11 17:14:24 +0200525 update_last_devid(dev->devid);
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200526 break;
527 default:
528 break;
529 }
Joerg Roedelb514e552008-09-17 17:14:27 +0200530 p += ivhd_entry_length(p);
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200531 }
532
533 WARN_ON(p != end);
534
535 return 0;
536}
537
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400538static int __init check_ivrs_checksum(struct acpi_table_header *table)
539{
540 int i;
541 u8 checksum = 0, *p = (u8 *)table;
542
543 for (i = 0; i < table->length; ++i)
544 checksum += p[i];
545 if (checksum != 0) {
546 /* ACPI table corrupt */
Joerg Roedel101fa032018-11-27 16:22:31 +0100547 pr_err(FW_BUG "IVRS invalid checksum\n");
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400548 return -ENODEV;
549 }
550
551 return 0;
552}
553
Joerg Roedelb65233a2008-07-11 17:14:21 +0200554/*
555 * Iterate over all IVHD entries in the ACPI table and find the highest device
556 * id which we need to handle. This is the first of three functions which parse
557 * the ACPI table. So we check the checksum here.
558 */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200559static int __init find_last_devid_acpi(struct acpi_table_header *table)
560{
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400561 u8 *p = (u8 *)table, *end = (u8 *)table;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200562 struct ivhd_header *h;
563
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200564 p += IVRS_HEADER_LENGTH;
565
566 end += table->length;
567 while (p < end) {
568 h = (struct ivhd_header *)p;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400569 if (h->type == amd_iommu_target_ivhd_type) {
570 int ret = find_last_devid_from_ivhd(h);
571
572 if (ret)
573 return ret;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200574 }
575 p += h->length;
576 }
577 WARN_ON(p != end);
578
579 return 0;
580}
581
Joerg Roedelb65233a2008-07-11 17:14:21 +0200582/****************************************************************************
583 *
Frank Arnolddf805ab2012-08-27 19:21:04 +0200584 * The following functions belong to the code path which parses the ACPI table
Joerg Roedelb65233a2008-07-11 17:14:21 +0200585 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
586 * data structures, initialize the device/alias/rlookup table and also
587 * basically initialize the hardware.
588 *
589 ****************************************************************************/
590
591/*
592 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
593 * write commands to that buffer later and the IOMMU will execute them
594 * asynchronously
595 */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200596static int __init alloc_command_buffer(struct amd_iommu *iommu)
Joerg Roedelb36ca912008-06-26 21:27:45 +0200597{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200598 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
599 get_order(CMD_BUFFER_SIZE));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200600
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200601 return iommu->cmd_buf ? 0 : -ENOMEM;
Joerg Roedel58492e12009-05-04 18:41:16 +0200602}
603
604/*
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200605 * This function resets the command buffer if the IOMMU stopped fetching
606 * commands from it.
607 */
608void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
609{
610 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
611
612 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
613 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Tom Lendackyd334a562017-06-05 14:52:12 -0500614 iommu->cmd_buf_head = 0;
615 iommu->cmd_buf_tail = 0;
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200616
617 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
618}
619
620/*
Joerg Roedel58492e12009-05-04 18:41:16 +0200621 * This function writes the command buffer address to the hardware and
622 * enables it.
623 */
624static void iommu_enable_command_buffer(struct amd_iommu *iommu)
625{
626 u64 entry;
627
628 BUG_ON(iommu->cmd_buf == NULL);
629
Tom Lendacky2543a782017-07-17 16:10:24 -0500630 entry = iommu_virt_to_phys(iommu->cmd_buf);
Joerg Roedelb36ca912008-06-26 21:27:45 +0200631 entry |= MMIO_CMD_SIZE_512;
Joerg Roedel58492e12009-05-04 18:41:16 +0200632
Joerg Roedelb36ca912008-06-26 21:27:45 +0200633 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
Joerg Roedel58492e12009-05-04 18:41:16 +0200634 &entry, sizeof(entry));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200635
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200636 amd_iommu_reset_cmd_buffer(iommu);
Joerg Roedelb36ca912008-06-26 21:27:45 +0200637}
638
Baoquan He78d313c2017-08-09 16:33:34 +0800639/*
640 * This function disables the command buffer
641 */
642static void iommu_disable_command_buffer(struct amd_iommu *iommu)
643{
644 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
645}
646
Joerg Roedelb36ca912008-06-26 21:27:45 +0200647static void __init free_command_buffer(struct amd_iommu *iommu)
648{
Joerg Roedeldeba4bc2015-10-20 17:33:41 +0200649 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200650}
651
Joerg Roedel335503e2008-09-05 14:29:07 +0200652/* allocates the memory where the IOMMU will log its events to */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200653static int __init alloc_event_buffer(struct amd_iommu *iommu)
Joerg Roedel335503e2008-09-05 14:29:07 +0200654{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200655 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
656 get_order(EVT_BUFFER_SIZE));
Joerg Roedel335503e2008-09-05 14:29:07 +0200657
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200658 return iommu->evt_buf ? 0 : -ENOMEM;
Joerg Roedel58492e12009-05-04 18:41:16 +0200659}
660
661static void iommu_enable_event_buffer(struct amd_iommu *iommu)
662{
663 u64 entry;
664
665 BUG_ON(iommu->evt_buf == NULL);
666
Tom Lendacky2543a782017-07-17 16:10:24 -0500667 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
Joerg Roedel58492e12009-05-04 18:41:16 +0200668
Joerg Roedel335503e2008-09-05 14:29:07 +0200669 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
670 &entry, sizeof(entry));
671
Joerg Roedel090672072009-06-15 16:06:48 +0200672 /* set head and tail to zero manually */
673 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
674 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
675
Joerg Roedel58492e12009-05-04 18:41:16 +0200676 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
Joerg Roedel335503e2008-09-05 14:29:07 +0200677}
678
Baoquan He78d313c2017-08-09 16:33:34 +0800679/*
680 * This function disables the event log buffer
681 */
682static void iommu_disable_event_buffer(struct amd_iommu *iommu)
683{
684 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
685}
686
Joerg Roedel335503e2008-09-05 14:29:07 +0200687static void __init free_event_buffer(struct amd_iommu *iommu)
688{
689 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
690}
691
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100692/* allocates the memory where the IOMMU will log its events to */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200693static int __init alloc_ppr_log(struct amd_iommu *iommu)
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100694{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200695 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
696 get_order(PPR_LOG_SIZE));
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100697
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200698 return iommu->ppr_log ? 0 : -ENOMEM;
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100699}
700
701static void iommu_enable_ppr_log(struct amd_iommu *iommu)
702{
703 u64 entry;
704
705 if (iommu->ppr_log == NULL)
706 return;
707
Tom Lendacky2543a782017-07-17 16:10:24 -0500708 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100709
710 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
711 &entry, sizeof(entry));
712
713 /* set head and tail to zero manually */
714 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
715 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
716
Adrian Huangbde9e6b2019-12-30 13:56:54 +0800717 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100718 iommu_feature_enable(iommu, CONTROL_PPR_EN);
719}
720
721static void __init free_ppr_log(struct amd_iommu *iommu)
722{
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100723 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
724}
725
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500726static void free_ga_log(struct amd_iommu *iommu)
727{
728#ifdef CONFIG_IRQ_REMAP
Libing Zhou092550e2020-07-22 14:44:50 +0800729 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
730 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500731#endif
732}
733
734static int iommu_ga_log_enable(struct amd_iommu *iommu)
735{
736#ifdef CONFIG_IRQ_REMAP
737 u32 status, i;
738
739 if (!iommu->ga_log)
740 return -EINVAL;
741
742 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
743
744 /* Check if already running */
745 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
746 return 0;
747
748 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
749 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
750
751 for (i = 0; i < LOOP_TIMEOUT; ++i) {
752 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
753 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
754 break;
755 }
756
757 if (i >= LOOP_TIMEOUT)
758 return -EINVAL;
759#endif /* CONFIG_IRQ_REMAP */
760 return 0;
761}
762
763#ifdef CONFIG_IRQ_REMAP
764static int iommu_init_ga_log(struct amd_iommu *iommu)
765{
766 u64 entry;
767
768 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
769 return 0;
770
771 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
772 get_order(GA_LOG_SIZE));
773 if (!iommu->ga_log)
774 goto err_out;
775
776 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
777 get_order(8));
778 if (!iommu->ga_log_tail)
779 goto err_out;
780
Tom Lendacky2543a782017-07-17 16:10:24 -0500781 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500782 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
783 &entry, sizeof(entry));
Filippo Sironiab99be42018-11-12 12:26:30 +0000784 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
785 (BIT_ULL(52)-1)) & ~7ULL;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500786 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
787 &entry, sizeof(entry));
788 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
789 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
790
791 return 0;
792err_out:
793 free_ga_log(iommu);
794 return -EINVAL;
795}
796#endif /* CONFIG_IRQ_REMAP */
797
798static int iommu_init_ga(struct amd_iommu *iommu)
799{
800 int ret = 0;
801
802#ifdef CONFIG_IRQ_REMAP
803 /* Note: We have already checked GASup from IVRS table.
804 * Now, we need to make sure that GAMSup is set.
805 */
806 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
807 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
808 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
809
810 ret = iommu_init_ga_log(iommu);
811#endif /* CONFIG_IRQ_REMAP */
812
813 return ret;
814}
815
Suravee Suthikulpanitc69d89a2020-09-23 12:13:45 +0000816static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
817{
818 iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
819
820 return iommu->cmd_sem ? 0 : -ENOMEM;
821}
822
823static void __init free_cwwb_sem(struct amd_iommu *iommu)
824{
825 if (iommu->cmd_sem)
826 free_page((unsigned long)iommu->cmd_sem);
827}
828
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -0500829static void iommu_enable_xt(struct amd_iommu *iommu)
830{
831#ifdef CONFIG_IRQ_REMAP
832 /*
833 * XT mode (32-bit APIC destination ID) requires
834 * GA mode (128-bit IRTE support) as a prerequisite.
835 */
836 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
837 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
838 iommu_feature_enable(iommu, CONTROL_XT_EN);
839#endif /* CONFIG_IRQ_REMAP */
840}
841
Joerg Roedelcbc33a92011-11-25 11:41:31 +0100842static void iommu_enable_gt(struct amd_iommu *iommu)
843{
844 if (!iommu_feature(iommu, FEATURE_GT))
845 return;
846
847 iommu_feature_enable(iommu, CONTROL_GT_EN);
848}
849
Joerg Roedelb65233a2008-07-11 17:14:21 +0200850/* sets a specific bit in the device table entry. */
Joerg Roedel3566b772008-06-26 21:27:46 +0200851static void set_dev_entry_bit(u16 devid, u8 bit)
852{
Joerg Roedelee6c2862011-11-09 12:06:03 +0100853 int i = (bit >> 6) & 0x03;
854 int _bit = bit & 0x3f;
Joerg Roedel3566b772008-06-26 21:27:46 +0200855
Joerg Roedelee6c2862011-11-09 12:06:03 +0100856 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
Joerg Roedel3566b772008-06-26 21:27:46 +0200857}
858
Joerg Roedelc5cca142009-10-09 18:31:20 +0200859static int get_dev_entry_bit(u16 devid, u8 bit)
860{
Joerg Roedelee6c2862011-11-09 12:06:03 +0100861 int i = (bit >> 6) & 0x03;
862 int _bit = bit & 0x3f;
Joerg Roedelc5cca142009-10-09 18:31:20 +0200863
Joerg Roedelee6c2862011-11-09 12:06:03 +0100864 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
Joerg Roedelc5cca142009-10-09 18:31:20 +0200865}
866
867
Baoquan He45a01c42017-08-09 16:33:37 +0800868static bool copy_device_table(void)
869{
Joerg Roedelae162ef2017-08-19 00:28:02 +0200870 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
Baoquan He45a01c42017-08-09 16:33:37 +0800871 struct dev_table_entry *old_devtb = NULL;
872 u32 lo, hi, devid, old_devtb_size;
873 phys_addr_t old_devtb_phys;
Baoquan He45a01c42017-08-09 16:33:37 +0800874 struct amd_iommu *iommu;
Baoquan He53019a92017-08-09 16:33:39 +0800875 u16 dom_id, dte_v, irq_v;
Baoquan He45a01c42017-08-09 16:33:37 +0800876 gfp_t gfp_flag;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800877 u64 tmp;
Baoquan He45a01c42017-08-09 16:33:37 +0800878
Baoquan He3ac3e5ee2017-08-09 16:33:38 +0800879 if (!amd_iommu_pre_enabled)
880 return false;
Baoquan He45a01c42017-08-09 16:33:37 +0800881
882 pr_warn("Translation is already enabled - trying to copy translation structures\n");
883 for_each_iommu(iommu) {
884 /* All IOMMUs should use the same device table with the same size */
885 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
886 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
887 entry = (((u64) hi) << 32) + lo;
888 if (last_entry && last_entry != entry) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530889 pr_err("IOMMU:%d should use the same dev table as others!\n",
Baoquan He45a01c42017-08-09 16:33:37 +0800890 iommu->index);
891 return false;
892 }
893 last_entry = entry;
894
895 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
896 if (old_devtb_size != dev_table_size) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530897 pr_err("The device table size of IOMMU:%d is not expected!\n",
Baoquan He45a01c42017-08-09 16:33:37 +0800898 iommu->index);
899 return false;
900 }
901 }
902
Lianbo Jiang87801582018-09-30 11:10:32 +0800903 /*
904 * When SME is enabled in the first kernel, the entry includes the
905 * memory encryption mask(sme_me_mask), we must remove the memory
906 * encryption mask to obtain the true physical address in kdump kernel.
907 */
908 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
909
Baoquan Heb3367812017-08-09 16:33:42 +0800910 if (old_devtb_phys >= 0x100000000ULL) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530911 pr_err("The address of old device table is above 4G, not trustworthy!\n");
Baoquan Heb3367812017-08-09 16:33:42 +0800912 return false;
913 }
Lianbo Jiang87801582018-09-30 11:10:32 +0800914 old_devtb = (sme_active() && is_kdump_kernel())
915 ? (__force void *)ioremap_encrypted(old_devtb_phys,
916 dev_table_size)
917 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
918
Baoquan He45a01c42017-08-09 16:33:37 +0800919 if (!old_devtb)
920 return false;
921
Baoquan Heb3367812017-08-09 16:33:42 +0800922 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
Baoquan He45a01c42017-08-09 16:33:37 +0800923 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
924 get_order(dev_table_size));
925 if (old_dev_tbl_cpy == NULL) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530926 pr_err("Failed to allocate memory for copying old device table!\n");
Baoquan He45a01c42017-08-09 16:33:37 +0800927 return false;
928 }
929
930 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
931 old_dev_tbl_cpy[devid] = old_devtb[devid];
932 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
933 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
Baoquan He53019a92017-08-09 16:33:39 +0800934
935 if (dte_v && dom_id) {
936 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
937 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
Baoquan He45a01c42017-08-09 16:33:37 +0800938 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
Baoquan Hedaae2d22017-08-09 16:33:43 +0800939 /* If gcr3 table existed, mask it out */
940 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
941 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
942 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
943 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
944 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
945 tmp |= DTE_FLAG_GV;
946 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
947 }
Baoquan He53019a92017-08-09 16:33:39 +0800948 }
949
950 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
951 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
952 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
953 if (irq_v && (int_ctl || int_tab_len)) {
954 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
955 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
956 pr_err("Wrong old irq remapping flag: %#x\n", devid);
957 return false;
958 }
959
960 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
961 }
Baoquan He45a01c42017-08-09 16:33:37 +0800962 }
963 memunmap(old_devtb);
964
965 return true;
966}
967
Joerg Roedelc5cca142009-10-09 18:31:20 +0200968void amd_iommu_apply_erratum_63(u16 devid)
969{
970 int sysmgt;
971
972 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
973 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
974
975 if (sysmgt == 0x01)
976 set_dev_entry_bit(devid, DEV_ENTRY_IW);
977}
978
Joerg Roedel5ff47892008-07-14 20:11:18 +0200979/* Writes the specific IOMMU for a device into the rlookup table */
980static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
981{
982 amd_iommu_rlookup_table[devid] = iommu;
983}
984
Joerg Roedelb65233a2008-07-11 17:14:21 +0200985/*
986 * This function takes the device specific flags read from the ACPI
987 * table and sets up the device table entry with that information
988 */
Joerg Roedel5ff47892008-07-14 20:11:18 +0200989static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
990 u16 devid, u32 flags, u32 ext_flags)
Joerg Roedel3566b772008-06-26 21:27:46 +0200991{
992 if (flags & ACPI_DEVFLAG_INITPASS)
993 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
994 if (flags & ACPI_DEVFLAG_EXTINT)
995 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
996 if (flags & ACPI_DEVFLAG_NMI)
997 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
998 if (flags & ACPI_DEVFLAG_SYSMGT1)
999 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1000 if (flags & ACPI_DEVFLAG_SYSMGT2)
1001 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1002 if (flags & ACPI_DEVFLAG_LINT0)
1003 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1004 if (flags & ACPI_DEVFLAG_LINT1)
1005 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
Joerg Roedel3566b772008-06-26 21:27:46 +02001006
Joerg Roedelc5cca142009-10-09 18:31:20 +02001007 amd_iommu_apply_erratum_63(devid);
1008
Joerg Roedel5ff47892008-07-14 20:11:18 +02001009 set_iommu_for_device(iommu, devid);
Joerg Roedel3566b772008-06-26 21:27:46 +02001010}
1011
Kai-Heng Feng93d05152019-08-21 13:10:04 +08001012int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
Joerg Roedel6efed632012-06-14 15:52:58 +02001013{
1014 struct devid_map *entry;
1015 struct list_head *list;
1016
Joerg Roedel31cff672013-04-09 16:53:58 +02001017 if (type == IVHD_SPECIAL_IOAPIC)
1018 list = &ioapic_map;
1019 else if (type == IVHD_SPECIAL_HPET)
1020 list = &hpet_map;
1021 else
Joerg Roedel6efed632012-06-14 15:52:58 +02001022 return -EINVAL;
1023
Joerg Roedel31cff672013-04-09 16:53:58 +02001024 list_for_each_entry(entry, list, list) {
1025 if (!(entry->id == id && entry->cmd_line))
1026 continue;
1027
Joerg Roedel101fa032018-11-27 16:22:31 +01001028 pr_info("Command-line override present for %s id %d - ignoring\n",
Joerg Roedel31cff672013-04-09 16:53:58 +02001029 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1030
Joerg Roedelc50e3242014-09-09 15:59:37 +02001031 *devid = entry->devid;
1032
Joerg Roedel31cff672013-04-09 16:53:58 +02001033 return 0;
1034 }
1035
Joerg Roedel6efed632012-06-14 15:52:58 +02001036 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1037 if (!entry)
1038 return -ENOMEM;
1039
Joerg Roedel31cff672013-04-09 16:53:58 +02001040 entry->id = id;
Joerg Roedelc50e3242014-09-09 15:59:37 +02001041 entry->devid = *devid;
Joerg Roedel31cff672013-04-09 16:53:58 +02001042 entry->cmd_line = cmd_line;
Joerg Roedel6efed632012-06-14 15:52:58 +02001043
1044 list_add_tail(&entry->list, list);
1045
1046 return 0;
1047}
1048
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001049static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1050 bool cmd_line)
1051{
1052 struct acpihid_map_entry *entry;
1053 struct list_head *list = &acpihid_map;
1054
1055 list_for_each_entry(entry, list, list) {
1056 if (strcmp(entry->hid, hid) ||
1057 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1058 !entry->cmd_line)
1059 continue;
1060
Joerg Roedel101fa032018-11-27 16:22:31 +01001061 pr_info("Command-line override for hid:%s uid:%s\n",
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001062 hid, uid);
1063 *devid = entry->devid;
1064 return 0;
1065 }
1066
1067 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1068 if (!entry)
1069 return -ENOMEM;
1070
1071 memcpy(entry->uid, uid, strlen(uid));
1072 memcpy(entry->hid, hid, strlen(hid));
1073 entry->devid = *devid;
1074 entry->cmd_line = cmd_line;
1075 entry->root_devid = (entry->devid & (~0x7));
1076
Joerg Roedel101fa032018-11-27 16:22:31 +01001077 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001078 entry->cmd_line ? "cmd" : "ivrs",
1079 entry->hid, entry->uid, entry->root_devid);
1080
1081 list_add_tail(&entry->list, list);
1082 return 0;
1083}
1084
Joerg Roedel235dacb2013-04-09 17:53:14 +02001085static int __init add_early_maps(void)
1086{
1087 int i, ret;
1088
1089 for (i = 0; i < early_ioapic_map_size; ++i) {
1090 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1091 early_ioapic_map[i].id,
Joerg Roedelc50e3242014-09-09 15:59:37 +02001092 &early_ioapic_map[i].devid,
Joerg Roedel235dacb2013-04-09 17:53:14 +02001093 early_ioapic_map[i].cmd_line);
1094 if (ret)
1095 return ret;
1096 }
1097
1098 for (i = 0; i < early_hpet_map_size; ++i) {
1099 ret = add_special_device(IVHD_SPECIAL_HPET,
1100 early_hpet_map[i].id,
Joerg Roedelc50e3242014-09-09 15:59:37 +02001101 &early_hpet_map[i].devid,
Joerg Roedel235dacb2013-04-09 17:53:14 +02001102 early_hpet_map[i].cmd_line);
1103 if (ret)
1104 return ret;
1105 }
1106
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001107 for (i = 0; i < early_acpihid_map_size; ++i) {
1108 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1109 early_acpihid_map[i].uid,
1110 &early_acpihid_map[i].devid,
1111 early_acpihid_map[i].cmd_line);
1112 if (ret)
1113 return ret;
1114 }
1115
Joerg Roedel235dacb2013-04-09 17:53:14 +02001116 return 0;
1117}
1118
Joerg Roedelb65233a2008-07-11 17:14:21 +02001119/*
Frank Arnolddf805ab2012-08-27 19:21:04 +02001120 * Reads the device exclusion range from ACPI and initializes the IOMMU with
Joerg Roedelb65233a2008-07-11 17:14:21 +02001121 * it
1122 */
Joerg Roedel3566b772008-06-26 21:27:46 +02001123static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1124{
Joerg Roedel3566b772008-06-26 21:27:46 +02001125 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1126 return;
1127
Adrian Huang387caf02019-11-14 14:14:47 +08001128 /*
1129 * Treat per-device exclusion ranges as r/w unity-mapped regions
1130 * since some buggy BIOSes might lead to the overwritten exclusion
1131 * range (exclusion_start and exclusion_length members). This
1132 * happens when there are multiple exclusion ranges (IVMD entries)
1133 * defined in ACPI table.
1134 */
1135 m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
Joerg Roedel3566b772008-06-26 21:27:46 +02001136}
1137
Joerg Roedelb65233a2008-07-11 17:14:21 +02001138/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02001139 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1140 * initializes the hardware and our data structures with it.
1141 */
Joerg Roedel6efed632012-06-14 15:52:58 +02001142static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001143 struct ivhd_header *h)
1144{
1145 u8 *p = (u8 *)h;
1146 u8 *end = p, flags = 0;
Joerg Roedel0de66d52011-06-06 16:04:02 +02001147 u16 devid = 0, devid_start = 0, devid_to = 0;
1148 u32 dev_i, ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001149 bool alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001150 struct ivhd_entry *e;
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001151 u32 ivhd_size;
Joerg Roedel235dacb2013-04-09 17:53:14 +02001152 int ret;
1153
1154
1155 ret = add_early_maps();
1156 if (ret)
1157 return ret;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001158
Kai-Heng Feng93d05152019-08-21 13:10:04 +08001159 amd_iommu_apply_ivrs_quirks();
1160
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001161 /*
Joerg Roedele9bf5192010-09-20 14:33:07 +02001162 * First save the recommended feature enable bits from ACPI
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001163 */
Joerg Roedele9bf5192010-09-20 14:33:07 +02001164 iommu->acpi_flags = h->flags;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001165
1166 /*
1167 * Done. Now parse the device entries
1168 */
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001169 ivhd_size = get_ivhd_header_size(h);
1170 if (!ivhd_size) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001171 pr_err("Unsupported IVHD type %#x\n", h->type);
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001172 return -EINVAL;
1173 }
1174
1175 p += ivhd_size;
1176
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001177 end += h->length;
1178
Joerg Roedel42a698f2009-05-20 15:41:28 +02001179
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001180 while (p < end) {
1181 e = (struct ivhd_entry *)p;
1182 switch (e->type) {
1183 case IVHD_DEV_ALL:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001184
Joerg Roedel226e8892015-10-20 17:33:44 +02001185 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
Joerg Roedel42a698f2009-05-20 15:41:28 +02001186
Joerg Roedel226e8892015-10-20 17:33:44 +02001187 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1188 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001189 break;
1190 case IVHD_DEV_SELECT:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001191
1192 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1193 "flags: %02x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001194 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001195 PCI_SLOT(e->devid),
1196 PCI_FUNC(e->devid),
1197 e->flags);
1198
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001199 devid = e->devid;
Joerg Roedel5ff47892008-07-14 20:11:18 +02001200 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001201 break;
1202 case IVHD_DEV_SELECT_RANGE_START:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001203
1204 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1205 "devid: %02x:%02x.%x flags: %02x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001206 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001207 PCI_SLOT(e->devid),
1208 PCI_FUNC(e->devid),
1209 e->flags);
1210
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001211 devid_start = e->devid;
1212 flags = e->flags;
1213 ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001214 alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001215 break;
1216 case IVHD_DEV_ALIAS:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001217
1218 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1219 "flags: %02x devid_to: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001220 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001221 PCI_SLOT(e->devid),
1222 PCI_FUNC(e->devid),
1223 e->flags,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001224 PCI_BUS_NUM(e->ext >> 8),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001225 PCI_SLOT(e->ext >> 8),
1226 PCI_FUNC(e->ext >> 8));
1227
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001228 devid = e->devid;
1229 devid_to = e->ext >> 8;
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001230 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
Neil Turton7455aab2009-05-14 14:08:11 +01001231 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001232 amd_iommu_alias_table[devid] = devid_to;
1233 break;
1234 case IVHD_DEV_ALIAS_RANGE:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001235
1236 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1237 "devid: %02x:%02x.%x flags: %02x "
1238 "devid_to: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001239 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001240 PCI_SLOT(e->devid),
1241 PCI_FUNC(e->devid),
1242 e->flags,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001243 PCI_BUS_NUM(e->ext >> 8),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001244 PCI_SLOT(e->ext >> 8),
1245 PCI_FUNC(e->ext >> 8));
1246
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001247 devid_start = e->devid;
1248 flags = e->flags;
1249 devid_to = e->ext >> 8;
1250 ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001251 alias = true;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001252 break;
1253 case IVHD_DEV_EXT_SELECT:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001254
1255 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1256 "flags: %02x ext: %08x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001257 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001258 PCI_SLOT(e->devid),
1259 PCI_FUNC(e->devid),
1260 e->flags, e->ext);
1261
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001262 devid = e->devid;
Joerg Roedel5ff47892008-07-14 20:11:18 +02001263 set_dev_entry_from_acpi(iommu, devid, e->flags,
1264 e->ext);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001265 break;
1266 case IVHD_DEV_EXT_SELECT_RANGE:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001267
1268 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1269 "%02x:%02x.%x flags: %02x ext: %08x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001270 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001271 PCI_SLOT(e->devid),
1272 PCI_FUNC(e->devid),
1273 e->flags, e->ext);
1274
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001275 devid_start = e->devid;
1276 flags = e->flags;
1277 ext_flags = e->ext;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001278 alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001279 break;
1280 case IVHD_DEV_RANGE_END:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001281
1282 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001283 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001284 PCI_SLOT(e->devid),
1285 PCI_FUNC(e->devid));
1286
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001287 devid = e->devid;
1288 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001289 if (alias) {
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001290 amd_iommu_alias_table[dev_i] = devid_to;
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001291 set_dev_entry_from_acpi(iommu,
1292 devid_to, flags, ext_flags);
1293 }
1294 set_dev_entry_from_acpi(iommu, dev_i,
1295 flags, ext_flags);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001296 }
1297 break;
Joerg Roedel6efed632012-06-14 15:52:58 +02001298 case IVHD_DEV_SPECIAL: {
1299 u8 handle, type;
1300 const char *var;
1301 u16 devid;
1302 int ret;
1303
1304 handle = e->ext & 0xff;
1305 devid = (e->ext >> 8) & 0xffff;
1306 type = (e->ext >> 24) & 0xff;
1307
1308 if (type == IVHD_SPECIAL_IOAPIC)
1309 var = "IOAPIC";
1310 else if (type == IVHD_SPECIAL_HPET)
1311 var = "HPET";
1312 else
1313 var = "UNKNOWN";
1314
1315 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1316 var, (int)handle,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001317 PCI_BUS_NUM(devid),
Joerg Roedel6efed632012-06-14 15:52:58 +02001318 PCI_SLOT(devid),
1319 PCI_FUNC(devid));
1320
Joerg Roedelc50e3242014-09-09 15:59:37 +02001321 ret = add_special_device(type, handle, &devid, false);
Joerg Roedel6efed632012-06-14 15:52:58 +02001322 if (ret)
1323 return ret;
Joerg Roedelc50e3242014-09-09 15:59:37 +02001324
1325 /*
1326 * add_special_device might update the devid in case a
1327 * command-line override is present. So call
1328 * set_dev_entry_from_acpi after add_special_device.
1329 */
1330 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1331
Joerg Roedel6efed632012-06-14 15:52:58 +02001332 break;
1333 }
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001334 case IVHD_DEV_ACPI_HID: {
1335 u16 devid;
Alexander Monakove461b8c2020-05-11 10:23:52 +00001336 u8 hid[ACPIHID_HID_LEN];
1337 u8 uid[ACPIHID_UID_LEN];
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001338 int ret;
1339
1340 if (h->type != 0x40) {
1341 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1342 e->type);
1343 break;
1344 }
1345
1346 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1347 hid[ACPIHID_HID_LEN - 1] = '\0';
1348
1349 if (!(*hid)) {
1350 pr_err(FW_BUG "Invalid HID.\n");
1351 break;
1352 }
1353
Alexander Monakove461b8c2020-05-11 10:23:52 +00001354 uid[0] = '\0';
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001355 switch (e->uidf) {
1356 case UID_NOT_PRESENT:
1357
1358 if (e->uidl != 0)
1359 pr_warn(FW_BUG "Invalid UID length.\n");
1360
1361 break;
1362 case UID_IS_INTEGER:
1363
1364 sprintf(uid, "%d", e->uid);
1365
1366 break;
1367 case UID_IS_CHARACTER:
1368
Alexander Monakove461b8c2020-05-11 10:23:52 +00001369 memcpy(uid, &e->uid, e->uidl);
1370 uid[e->uidl] = '\0';
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001371
1372 break;
1373 default:
1374 break;
1375 }
1376
Nicolas Iooss6082ee72016-06-26 10:33:29 +02001377 devid = e->devid;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001378 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1379 hid, uid,
1380 PCI_BUS_NUM(devid),
1381 PCI_SLOT(devid),
1382 PCI_FUNC(devid));
1383
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001384 flags = e->flags;
1385
1386 ret = add_acpi_hid_device(hid, uid, &devid, false);
1387 if (ret)
1388 return ret;
1389
1390 /*
1391 * add_special_device might update the devid in case a
1392 * command-line override is present. So call
1393 * set_dev_entry_from_acpi after add_special_device.
1394 */
1395 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1396
1397 break;
1398 }
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001399 default:
1400 break;
1401 }
1402
Joerg Roedelb514e552008-09-17 17:14:27 +02001403 p += ivhd_entry_length(p);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001404 }
Joerg Roedel6efed632012-06-14 15:52:58 +02001405
1406 return 0;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001407}
1408
Joerg Roedele47d4022008-06-26 21:27:48 +02001409static void __init free_iommu_one(struct amd_iommu *iommu)
1410{
Suravee Suthikulpanitc69d89a2020-09-23 12:13:45 +00001411 free_cwwb_sem(iommu);
Joerg Roedele47d4022008-06-26 21:27:48 +02001412 free_command_buffer(iommu);
Joerg Roedel335503e2008-09-05 14:29:07 +02001413 free_event_buffer(iommu);
Joerg Roedel1a29ac02011-11-10 15:41:40 +01001414 free_ppr_log(iommu);
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001415 free_ga_log(iommu);
Joerg Roedele47d4022008-06-26 21:27:48 +02001416 iommu_unmap_mmio_space(iommu);
1417}
1418
1419static void __init free_iommu_all(void)
1420{
1421 struct amd_iommu *iommu, *next;
1422
Joerg Roedel3bd22172009-05-04 15:06:20 +02001423 for_each_iommu_safe(iommu, next) {
Joerg Roedele47d4022008-06-26 21:27:48 +02001424 list_del(&iommu->list);
1425 free_iommu_one(iommu);
1426 kfree(iommu);
1427 }
1428}
1429
Joerg Roedelb65233a2008-07-11 17:14:21 +02001430/*
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001431 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1432 * Workaround:
1433 * BIOS should disable L2B micellaneous clock gating by setting
1434 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1435 */
Nikola Pajkovskye2f1a3b2013-02-26 16:12:05 +01001436static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001437{
1438 u32 value;
1439
1440 if ((boot_cpu_data.x86 != 0x15) ||
1441 (boot_cpu_data.x86_model < 0x10) ||
1442 (boot_cpu_data.x86_model > 0x1f))
1443 return;
1444
1445 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1446 pci_read_config_dword(iommu->dev, 0xf4, &value);
1447
1448 if (value & BIT(2))
1449 return;
1450
1451 /* Select NB indirect register 0x90 and enable writing */
1452 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1453
1454 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001455 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001456
1457 /* Clear the enable writing bit */
1458 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1459}
1460
1461/*
Jay Cornwall358875f2016-02-10 15:48:01 -06001462 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1463 * Workaround:
1464 * BIOS should enable ATS write permission check by setting
1465 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1466 */
1467static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1468{
1469 u32 value;
1470
1471 if ((boot_cpu_data.x86 != 0x15) ||
1472 (boot_cpu_data.x86_model < 0x30) ||
1473 (boot_cpu_data.x86_model > 0x3f))
1474 return;
1475
1476 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1477 value = iommu_read_l2(iommu, 0x47);
1478
1479 if (value & BIT(0))
1480 return;
1481
1482 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1483 iommu_write_l2(iommu, 0x47, value | BIT(0));
1484
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001485 pci_info(iommu->dev, "Applying ATS write check workaround\n");
Jay Cornwall358875f2016-02-10 15:48:01 -06001486}
1487
1488/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02001489 * This function clues the initialization function for one IOMMU
1490 * together and also allocates the command buffer and programs the
1491 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1492 */
Joerg Roedele47d4022008-06-26 21:27:48 +02001493static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1494{
Joerg Roedel6efed632012-06-14 15:52:58 +02001495 int ret;
1496
Scott Wood27790392018-01-21 03:28:54 -06001497 raw_spin_lock_init(&iommu->lock);
Suravee Suthikulpanitc69d89a2020-09-23 12:13:45 +00001498 iommu->cmd_sem_val = 0;
Joerg Roedelbb527772009-11-20 14:31:51 +01001499
1500 /* Add IOMMU to internal data structures */
Joerg Roedele47d4022008-06-26 21:27:48 +02001501 list_add_tail(&iommu->list, &amd_iommu_list);
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06001502 iommu->index = amd_iommus_present++;
Joerg Roedelbb527772009-11-20 14:31:51 +01001503
1504 if (unlikely(iommu->index >= MAX_IOMMUS)) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001505 WARN(1, "System has more IOMMUs than supported by this driver\n");
Joerg Roedelbb527772009-11-20 14:31:51 +01001506 return -ENOSYS;
1507 }
1508
1509 /* Index is fine - add IOMMU to the array */
1510 amd_iommus[iommu->index] = iommu;
Joerg Roedele47d4022008-06-26 21:27:48 +02001511
1512 /*
1513 * Copy data from ACPI table entry to the iommu struct
1514 */
Joerg Roedel23c742d2012-06-12 11:47:34 +02001515 iommu->devid = h->devid;
Joerg Roedele47d4022008-06-26 21:27:48 +02001516 iommu->cap_ptr = h->cap_ptr;
Joerg Roedelee893c22008-09-08 14:48:04 +02001517 iommu->pci_seg = h->pci_seg;
Joerg Roedele47d4022008-06-26 21:27:48 +02001518 iommu->mmio_phys = h->mmio_phys;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001519
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001520 switch (h->type) {
1521 case 0x10:
1522 /* Check if IVHD EFR contains proper max banks/counters */
1523 if ((h->efr_attr != 0) &&
1524 ((h->efr_attr & (0xF << 13)) != 0) &&
1525 ((h->efr_attr & (0x3F << 17)) != 0))
1526 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1527 else
1528 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001529 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1530 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001531 break;
1532 case 0x11:
1533 case 0x40:
1534 if (h->efr_reg & (1 << 9))
1535 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1536 else
1537 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001538 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1539 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
Suravee Suthikulpanit81307142019-11-20 07:55:48 -06001540 /*
1541 * Note: Since iommu_update_intcapxt() leverages
1542 * the IOMMU MMIO access to MSI capability block registers
1543 * for MSI address lo/hi/data, we need to check both
1544 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
1545 */
1546 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
1547 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
1548 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001549 break;
1550 default:
1551 return -EINVAL;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001552 }
1553
1554 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1555 iommu->mmio_phys_end);
Joerg Roedele47d4022008-06-26 21:27:48 +02001556 if (!iommu->mmio_base)
1557 return -ENOMEM;
1558
Suravee Suthikulpanitc69d89a2020-09-23 12:13:45 +00001559 if (alloc_cwwb_sem(iommu))
1560 return -ENOMEM;
1561
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001562 if (alloc_command_buffer(iommu))
Joerg Roedele47d4022008-06-26 21:27:48 +02001563 return -ENOMEM;
1564
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001565 if (alloc_event_buffer(iommu))
Joerg Roedel335503e2008-09-05 14:29:07 +02001566 return -ENOMEM;
1567
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001568 iommu->int_enabled = false;
1569
Baoquan He4c232a72017-08-09 16:33:33 +08001570 init_translation_status(iommu);
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08001571 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1572 iommu_disable(iommu);
1573 clear_translation_pre_enabled(iommu);
1574 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1575 iommu->index);
1576 }
1577 if (amd_iommu_pre_enabled)
1578 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
Baoquan He4c232a72017-08-09 16:33:33 +08001579
Joerg Roedel6efed632012-06-14 15:52:58 +02001580 ret = init_iommu_from_acpi(iommu, h);
1581 if (ret)
1582 return ret;
Joerg Roedelf6fec002012-06-21 16:51:25 +02001583
Jiang Liu7c71d302015-04-13 14:11:33 +08001584 ret = amd_iommu_create_irq_domain(iommu);
1585 if (ret)
1586 return ret;
1587
Joerg Roedelf6fec002012-06-21 16:51:25 +02001588 /*
1589 * Make sure IOMMU is not considered to translate itself. The IVRS
1590 * table tells us so, but this is a lie!
1591 */
1592 amd_iommu_rlookup_table[iommu->devid] = NULL;
1593
Joerg Roedel23c742d2012-06-12 11:47:34 +02001594 return 0;
Joerg Roedele47d4022008-06-26 21:27:48 +02001595}
1596
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001597/**
1598 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
Krzysztof Kozlowski06ce8a62c2020-07-28 19:08:57 +02001599 * @ivrs: Pointer to the IVRS header
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001600 *
1601 * This function search through all IVDB of the maximum supported IVHD
1602 */
1603static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1604{
1605 u8 *base = (u8 *)ivrs;
1606 struct ivhd_header *ivhd = (struct ivhd_header *)
1607 (base + IVRS_HEADER_LENGTH);
1608 u8 last_type = ivhd->type;
1609 u16 devid = ivhd->devid;
1610
1611 while (((u8 *)ivhd - base < ivrs->length) &&
1612 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1613 u8 *p = (u8 *) ivhd;
1614
1615 if (ivhd->devid == devid)
1616 last_type = ivhd->type;
1617 ivhd = (struct ivhd_header *)(p + ivhd->length);
1618 }
1619
1620 return last_type;
1621}
1622
Joerg Roedelb65233a2008-07-11 17:14:21 +02001623/*
1624 * Iterates over all IOMMU entries in the ACPI table, allocates the
1625 * IOMMU structure and initializes it with init_iommu_one()
1626 */
Joerg Roedele47d4022008-06-26 21:27:48 +02001627static int __init init_iommu_all(struct acpi_table_header *table)
1628{
1629 u8 *p = (u8 *)table, *end = (u8 *)table;
1630 struct ivhd_header *h;
1631 struct amd_iommu *iommu;
1632 int ret;
1633
Joerg Roedele47d4022008-06-26 21:27:48 +02001634 end += table->length;
1635 p += IVRS_HEADER_LENGTH;
1636
1637 while (p < end) {
1638 h = (struct ivhd_header *)p;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001639 if (*p == amd_iommu_target_ivhd_type) {
Joerg Roedel9c720412009-05-20 13:53:57 +02001640
Joerg Roedelae908c22009-09-01 16:52:16 +02001641 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
Joerg Roedel9c720412009-05-20 13:53:57 +02001642 "seg: %d flags: %01x info %04x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001643 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
Joerg Roedel9c720412009-05-20 13:53:57 +02001644 PCI_FUNC(h->devid), h->cap_ptr,
1645 h->pci_seg, h->flags, h->info);
1646 DUMP_printk(" mmio-addr: %016llx\n",
1647 h->mmio_phys);
1648
Joerg Roedele47d4022008-06-26 21:27:48 +02001649 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02001650 if (iommu == NULL)
1651 return -ENOMEM;
Joerg Roedel3551a702010-03-01 13:52:19 +01001652
Joerg Roedele47d4022008-06-26 21:27:48 +02001653 ret = init_iommu_one(iommu, h);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02001654 if (ret)
1655 return ret;
Joerg Roedele47d4022008-06-26 21:27:48 +02001656 }
1657 p += h->length;
1658
1659 }
1660 WARN_ON(p != end);
1661
1662 return 0;
1663}
1664
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06001665static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1666 u8 fxn, u64 *value, bool is_write);
Steven L Kinney30861dd2013-06-05 16:11:48 -05001667
1668static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1669{
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001670 struct pci_dev *pdev = iommu->dev;
Shuah Khan8c17bbf2020-01-23 15:32:14 -07001671 u64 val = 0xabcd, val2 = 0, save_reg = 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001672
1673 if (!iommu_feature(iommu, FEATURE_PC))
1674 return;
1675
1676 amd_iommu_pc_present = true;
1677
Shuah Khan8c17bbf2020-01-23 15:32:14 -07001678 /* save the value to restore, if writable */
1679 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1680 goto pc_false;
1681
Steven L Kinney30861dd2013-06-05 16:11:48 -05001682 /* Check if the performance counters can be written to */
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06001683 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1684 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
Shuah Khan8c17bbf2020-01-23 15:32:14 -07001685 (val != val2))
1686 goto pc_false;
1687
1688 /* restore */
1689 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1690 goto pc_false;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001691
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001692 pci_info(pdev, "IOMMU performance counters supported\n");
Steven L Kinney30861dd2013-06-05 16:11:48 -05001693
1694 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1695 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1696 iommu->max_counters = (u8) ((val >> 7) & 0xf);
Shuah Khan8c17bbf2020-01-23 15:32:14 -07001697
1698 return;
1699
1700pc_false:
1701 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1702 amd_iommu_pc_present = false;
1703 return;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001704}
1705
Alex Williamson066f2e92014-06-12 16:12:37 -06001706static ssize_t amd_iommu_show_cap(struct device *dev,
1707 struct device_attribute *attr,
1708 char *buf)
1709{
Joerg Roedelb7a42b92017-02-28 13:57:18 +01001710 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
Alex Williamson066f2e92014-06-12 16:12:37 -06001711 return sprintf(buf, "%x\n", iommu->cap);
1712}
1713static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1714
1715static ssize_t amd_iommu_show_features(struct device *dev,
1716 struct device_attribute *attr,
1717 char *buf)
1718{
Joerg Roedelb7a42b92017-02-28 13:57:18 +01001719 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
Alex Williamson066f2e92014-06-12 16:12:37 -06001720 return sprintf(buf, "%llx\n", iommu->features);
1721}
1722static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1723
1724static struct attribute *amd_iommu_attrs[] = {
1725 &dev_attr_cap.attr,
1726 &dev_attr_features.attr,
1727 NULL,
1728};
1729
1730static struct attribute_group amd_iommu_group = {
1731 .name = "amd-iommu",
1732 .attrs = amd_iommu_attrs,
1733};
1734
1735static const struct attribute_group *amd_iommu_groups[] = {
1736 &amd_iommu_group,
1737 NULL,
1738};
Steven L Kinney30861dd2013-06-05 16:11:48 -05001739
Joerg Roedel24d2c522018-10-05 12:32:46 +02001740static int __init iommu_init_pci(struct amd_iommu *iommu)
Joerg Roedel23c742d2012-06-12 11:47:34 +02001741{
1742 int cap_ptr = iommu->cap_ptr;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001743 int ret;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001744
Sinan Kayad5bf0f42017-12-19 00:37:47 -05001745 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1746 iommu->devid & 0xff);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001747 if (!iommu->dev)
1748 return -ENODEV;
1749
Jiang Liucbbc00b2015-10-09 22:07:31 +08001750 /* Prevent binding other PCI device drivers to IOMMU devices */
1751 iommu->dev->match_driver = false;
1752
Joerg Roedel23c742d2012-06-12 11:47:34 +02001753 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1754 &iommu->cap);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001755
Joerg Roedel23c742d2012-06-12 11:47:34 +02001756 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1757 amd_iommu_iotlb_sup = false;
1758
1759 /* read extended feature bits */
Adrian Huang62dcee72020-01-09 11:02:50 +08001760 iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001761
1762 if (iommu_feature(iommu, FEATURE_GT)) {
1763 int glxval;
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001764 u32 max_pasid;
1765 u64 pasmax;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001766
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001767 pasmax = iommu->features & FEATURE_PASID_MASK;
1768 pasmax >>= FEATURE_PASID_SHIFT;
1769 max_pasid = (1 << (pasmax + 1)) - 1;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001770
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001771 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1772
1773 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001774
1775 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1776 glxval >>= FEATURE_GLXVAL_SHIFT;
1777
1778 if (amd_iommu_max_glx_val == -1)
1779 amd_iommu_max_glx_val = glxval;
1780 else
1781 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1782 }
1783
1784 if (iommu_feature(iommu, FEATURE_GT) &&
1785 iommu_feature(iommu, FEATURE_PPR)) {
1786 iommu->is_iommu_v2 = true;
1787 amd_iommu_v2_present = true;
1788 }
1789
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001790 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1791 return -ENOMEM;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001792
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001793 ret = iommu_init_ga(iommu);
1794 if (ret)
1795 return ret;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001796
Joerg Roedel23c742d2012-06-12 11:47:34 +02001797 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1798 amd_iommu_np_cache = true;
1799
Steven L Kinney30861dd2013-06-05 16:11:48 -05001800 init_iommu_perf_ctr(iommu);
1801
Joerg Roedel23c742d2012-06-12 11:47:34 +02001802 if (is_rd890_iommu(iommu->dev)) {
1803 int i, j;
1804
Sinan Kayad5bf0f42017-12-19 00:37:47 -05001805 iommu->root_pdev =
1806 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1807 PCI_DEVFN(0, 0));
Joerg Roedel23c742d2012-06-12 11:47:34 +02001808
1809 /*
1810 * Some rd890 systems may not be fully reconfigured by the
1811 * BIOS, so it's necessary for us to store this information so
1812 * it can be reprogrammed on resume
1813 */
1814 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1815 &iommu->stored_addr_lo);
1816 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1817 &iommu->stored_addr_hi);
1818
1819 /* Low bit locks writes to configuration space */
1820 iommu->stored_addr_lo &= ~1;
1821
1822 for (i = 0; i < 6; i++)
1823 for (j = 0; j < 0x12; j++)
1824 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1825
1826 for (i = 0; i < 0x83; i++)
1827 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1828 }
1829
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001830 amd_iommu_erratum_746_workaround(iommu);
Jay Cornwall358875f2016-02-10 15:48:01 -06001831 amd_iommu_ats_write_check_workaround(iommu);
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001832
Joerg Roedel39ab9552017-02-01 16:56:46 +01001833 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1834 amd_iommu_groups, "ivhd%d", iommu->index);
Joerg Roedelb0119e82017-02-01 13:23:08 +01001835 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1836 iommu_device_register(&iommu->iommu);
Alex Williamson066f2e92014-06-12 16:12:37 -06001837
Joerg Roedel23c742d2012-06-12 11:47:34 +02001838 return pci_enable_device(iommu->dev);
1839}
1840
Joerg Roedel4d121c32012-06-14 12:21:55 +02001841static void print_iommu_info(void)
1842{
1843 static const char * const feat_str[] = {
1844 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1845 "IA", "GA", "HE", "PC"
1846 };
1847 struct amd_iommu *iommu;
1848
1849 for_each_iommu(iommu) {
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001850 struct pci_dev *pdev = iommu->dev;
Joerg Roedel4d121c32012-06-14 12:21:55 +02001851 int i;
1852
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001853 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
Joerg Roedel4d121c32012-06-14 12:21:55 +02001854
1855 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
Paul Menzel9a295ff2020-06-17 00:04:20 +02001856 pci_info(pdev, "Extended features (%#llx):",
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001857 iommu->features);
Joerg Roedel2bd5ed02012-08-10 11:34:08 +02001858 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
Joerg Roedel4d121c32012-06-14 12:21:55 +02001859 if (iommu_feature(iommu, (1ULL << i)))
1860 pr_cont(" %s", feat_str[i]);
1861 }
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001862
1863 if (iommu->features & FEATURE_GAM_VAPIC)
1864 pr_cont(" GA_vAPIC");
1865
Steven L Kinney30861dd2013-06-05 16:11:48 -05001866 pr_cont("\n");
Borislav Petkov500c25e2012-09-28 16:22:26 +02001867 }
Joerg Roedel4d121c32012-06-14 12:21:55 +02001868 }
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001869 if (irq_remapping_enabled) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001870 pr_info("Interrupt remapping enabled\n");
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001871 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
Joerg Roedel101fa032018-11-27 16:22:31 +01001872 pr_info("Virtual APIC enabled\n");
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05001873 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
Joerg Roedel101fa032018-11-27 16:22:31 +01001874 pr_info("X2APIC enabled\n");
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001875 }
Joerg Roedel4d121c32012-06-14 12:21:55 +02001876}
1877
Joerg Roedel2c0ae172012-06-12 15:59:30 +02001878static int __init amd_iommu_init_pci(void)
Joerg Roedel23c742d2012-06-12 11:47:34 +02001879{
1880 struct amd_iommu *iommu;
1881 int ret = 0;
1882
1883 for_each_iommu(iommu) {
1884 ret = iommu_init_pci(iommu);
1885 if (ret)
1886 break;
1887 }
1888
Joerg Roedel522e5cb72016-07-01 16:42:55 +02001889 /*
1890 * Order is important here to make sure any unity map requirements are
1891 * fulfilled. The unity mappings are created and written to the device
1892 * table during the amd_iommu_init_api() call.
1893 *
1894 * After that we call init_device_table_dma() to make sure any
1895 * uninitialized DTE will block DMA, and in the end we flush the caches
1896 * of all IOMMUs to make sure the changes to the device table are
1897 * active.
1898 */
1899 ret = amd_iommu_init_api();
1900
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02001901 init_device_table_dma();
Joerg Roedel23c742d2012-06-12 11:47:34 +02001902
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02001903 for_each_iommu(iommu)
1904 iommu_flush_all_caches(iommu);
1905
Joerg Roedel3a18404c2015-05-28 18:41:45 +02001906 if (!ret)
1907 print_iommu_info();
Joerg Roedel4d121c32012-06-14 12:21:55 +02001908
Joerg Roedel23c742d2012-06-12 11:47:34 +02001909 return ret;
1910}
1911
Joerg Roedelb65233a2008-07-11 17:14:21 +02001912/****************************************************************************
1913 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001914 * The following functions initialize the MSI interrupts for all IOMMUs
Frank Arnolddf805ab2012-08-27 19:21:04 +02001915 * in the system. It's a bit challenging because there could be multiple
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001916 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1917 * pci_dev.
1918 *
1919 ****************************************************************************/
1920
Joerg Roedel9f800de2009-11-23 12:45:25 +01001921static int iommu_setup_msi(struct amd_iommu *iommu)
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001922{
1923 int r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001924
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001925 r = pci_enable_msi(iommu->dev);
1926 if (r)
1927 return r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001928
Joerg Roedel72fe00f2011-05-10 10:50:42 +02001929 r = request_threaded_irq(iommu->dev->irq,
1930 amd_iommu_int_handler,
1931 amd_iommu_int_thread,
1932 0, "AMD-Vi",
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -05001933 iommu);
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001934
1935 if (r) {
1936 pci_disable_msi(iommu->dev);
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001937 return r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001938 }
1939
Joerg Roedelfab6afa2009-05-04 18:46:34 +02001940 iommu->int_enabled = true;
Joerg Roedel1a29ac02011-11-10 15:41:40 +01001941
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001942 return 0;
1943}
1944
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +00001945#define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
1946#define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
1947#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
1948#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
1949
Krzysztof Kozlowski06ce8a62c2020-07-28 19:08:57 +02001950/*
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +00001951 * Setup the IntCapXT registers with interrupt routing information
1952 * based on the PCI MSI capability block registers, accessed via
1953 * MMIO MSI address low/hi and MSI data registers.
1954 */
1955static void iommu_update_intcapxt(struct amd_iommu *iommu)
1956{
1957 u64 val;
1958 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
1959 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
1960 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
1961 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
1962 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
1963
1964 if (x2apic_enabled())
1965 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
1966
1967 val = XT_INT_VEC(data & 0xFF) |
1968 XT_INT_DEST_MODE(dm) |
1969 XT_INT_DEST_LO(dest) |
1970 XT_INT_DEST_HI(dest);
1971
1972 /**
1973 * Current IOMMU implemtation uses the same IRQ for all
1974 * 3 IOMMU interrupts.
1975 */
1976 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
1977 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
1978 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
1979}
1980
1981static void _irq_notifier_notify(struct irq_affinity_notify *notify,
1982 const cpumask_t *mask)
1983{
1984 struct amd_iommu *iommu;
1985
1986 for_each_iommu(iommu) {
1987 if (iommu->dev->irq == notify->irq) {
1988 iommu_update_intcapxt(iommu);
1989 break;
1990 }
1991 }
1992}
1993
1994static void _irq_notifier_release(struct kref *ref)
1995{
1996}
1997
1998static int iommu_init_intcapxt(struct amd_iommu *iommu)
1999{
2000 int ret;
2001 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
2002
2003 /**
Suravee Suthikulpanit81307142019-11-20 07:55:48 -06002004 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
2005 * which can be inferred from amd_iommu_xt_mode.
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +00002006 */
2007 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
2008 return 0;
2009
2010 /**
2011 * Also, we need to setup notifier to update the IntCapXT registers
2012 * whenever the irq affinity is changed from user-space.
2013 */
2014 notify->irq = iommu->dev->irq;
2015 notify->notify = _irq_notifier_notify,
2016 notify->release = _irq_notifier_release,
2017 ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
2018 if (ret) {
2019 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2020 iommu->devid, iommu->dev->irq);
2021 return ret;
2022 }
2023
2024 iommu_update_intcapxt(iommu);
2025 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2026 return ret;
2027}
2028
Joerg Roedel05f92db2009-05-12 09:52:46 +02002029static int iommu_init_msi(struct amd_iommu *iommu)
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002030{
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002031 int ret;
2032
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002033 if (iommu->int_enabled)
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002034 goto enable_faults;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002035
Yijing Wang82fcfc62013-08-08 21:12:36 +08002036 if (iommu->dev->msi_cap)
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002037 ret = iommu_setup_msi(iommu);
2038 else
2039 ret = -ENODEV;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002040
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002041 if (ret)
2042 return ret;
2043
2044enable_faults:
Suthikulpanit, Suravee66929812019-07-16 04:29:16 +00002045 ret = iommu_init_intcapxt(iommu);
2046 if (ret)
2047 return ret;
2048
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002049 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2050
2051 if (iommu->ppr_log != NULL)
Adrian Huangbde9e6b2019-12-30 13:56:54 +08002052 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002053
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05002054 iommu_ga_log_enable(iommu);
2055
Joerg Roedel9ddd5922012-03-15 16:29:47 +01002056 return 0;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02002057}
2058
2059/****************************************************************************
2060 *
Joerg Roedelb65233a2008-07-11 17:14:21 +02002061 * The next functions belong to the third pass of parsing the ACPI
2062 * table. In this last pass the memory mapping requirements are
Frank Arnolddf805ab2012-08-27 19:21:04 +02002063 * gathered (like exclusion and unity mapping ranges).
Joerg Roedelb65233a2008-07-11 17:14:21 +02002064 *
2065 ****************************************************************************/
2066
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002067static void __init free_unity_maps(void)
2068{
2069 struct unity_map_entry *entry, *next;
2070
2071 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2072 list_del(&entry->list);
2073 kfree(entry);
2074 }
2075}
2076
Joerg Roedelb65233a2008-07-11 17:14:21 +02002077/* called when we find an exclusion range definition in ACPI */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002078static int __init init_exclusion_range(struct ivmd_header *m)
2079{
2080 int i;
2081
2082 switch (m->type) {
2083 case ACPI_IVMD_TYPE:
2084 set_device_exclusion_range(m->devid, m);
2085 break;
2086 case ACPI_IVMD_TYPE_ALL:
Joerg Roedel3a61ec32008-07-25 13:07:50 +02002087 for (i = 0; i <= amd_iommu_last_bdf; ++i)
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002088 set_device_exclusion_range(i, m);
2089 break;
2090 case ACPI_IVMD_TYPE_RANGE:
2091 for (i = m->devid; i <= m->aux; ++i)
2092 set_device_exclusion_range(i, m);
2093 break;
2094 default:
2095 break;
2096 }
2097
2098 return 0;
2099}
2100
Joerg Roedelb65233a2008-07-11 17:14:21 +02002101/* called for unity map ACPI definition */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002102static int __init init_unity_map_range(struct ivmd_header *m)
2103{
Joerg Roedel98f1ad22012-07-06 13:28:37 +02002104 struct unity_map_entry *e = NULL;
Joerg Roedel02acc432009-05-20 16:24:21 +02002105 char *s;
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002106
2107 e = kzalloc(sizeof(*e), GFP_KERNEL);
2108 if (e == NULL)
2109 return -ENOMEM;
2110
Joerg Roedel8aafaaf2019-03-28 11:44:59 +01002111 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2112 init_exclusion_range(m);
2113
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002114 switch (m->type) {
2115 default:
Joerg Roedel0bc252f2009-05-22 12:48:05 +02002116 kfree(e);
2117 return 0;
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002118 case ACPI_IVMD_TYPE:
Joerg Roedel02acc432009-05-20 16:24:21 +02002119 s = "IVMD_TYPEi\t\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002120 e->devid_start = e->devid_end = m->devid;
2121 break;
2122 case ACPI_IVMD_TYPE_ALL:
Joerg Roedel02acc432009-05-20 16:24:21 +02002123 s = "IVMD_TYPE_ALL\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002124 e->devid_start = 0;
2125 e->devid_end = amd_iommu_last_bdf;
2126 break;
2127 case ACPI_IVMD_TYPE_RANGE:
Joerg Roedel02acc432009-05-20 16:24:21 +02002128 s = "IVMD_TYPE_RANGE\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002129 e->devid_start = m->devid;
2130 e->devid_end = m->aux;
2131 break;
2132 }
2133 e->address_start = PAGE_ALIGN(m->range_start);
2134 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2135 e->prot = m->flags >> 1;
2136
Joerg Roedel02acc432009-05-20 16:24:21 +02002137 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2138 " range_start: %016llx range_end: %016llx flags: %x\n", s,
Shuah Khanc5081cd2013-02-27 17:07:19 -07002139 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2140 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
Joerg Roedel02acc432009-05-20 16:24:21 +02002141 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2142 e->address_start, e->address_end, m->flags);
2143
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002144 list_add_tail(&e->list, &amd_iommu_unity_map);
2145
2146 return 0;
2147}
2148
Joerg Roedelb65233a2008-07-11 17:14:21 +02002149/* iterates over all memory definitions we find in the ACPI table */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002150static int __init init_memory_definitions(struct acpi_table_header *table)
2151{
2152 u8 *p = (u8 *)table, *end = (u8 *)table;
2153 struct ivmd_header *m;
2154
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002155 end += table->length;
2156 p += IVRS_HEADER_LENGTH;
2157
2158 while (p < end) {
2159 m = (struct ivmd_header *)p;
Joerg Roedel8aafaaf2019-03-28 11:44:59 +01002160 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002161 init_unity_map_range(m);
2162
2163 p += m->length;
2164 }
2165
2166 return 0;
2167}
2168
Joerg Roedelb65233a2008-07-11 17:14:21 +02002169/*
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002170 * Init the device table to not allow DMA access for devices
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002171 */
Joerg Roedel33f28c52012-06-15 18:03:31 +02002172static void init_device_table_dma(void)
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002173{
Joerg Roedel0de66d52011-06-06 16:04:02 +02002174 u32 devid;
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002175
2176 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2177 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2178 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002179 }
2180}
2181
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002182static void __init uninit_device_table_dma(void)
2183{
2184 u32 devid;
2185
2186 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2187 amd_iommu_dev_table[devid].data[0] = 0ULL;
2188 amd_iommu_dev_table[devid].data[1] = 0ULL;
2189 }
2190}
2191
Joerg Roedel33f28c52012-06-15 18:03:31 +02002192static void init_device_table(void)
2193{
2194 u32 devid;
2195
2196 if (!amd_iommu_irq_remap)
2197 return;
2198
2199 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2200 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2201}
2202
Joerg Roedele9bf5192010-09-20 14:33:07 +02002203static void iommu_init_flags(struct amd_iommu *iommu)
2204{
2205 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2206 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2207 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2208
2209 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2210 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2211 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2212
2213 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2214 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2215 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2216
2217 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2218 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2219 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2220
2221 /*
2222 * make IOMMU memory accesses cache coherent
2223 */
2224 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
Joerg Roedel1456e9d2011-12-22 14:51:53 +01002225
2226 /* Set IOTLB invalidation timeout to 1s */
2227 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
Joerg Roedele9bf5192010-09-20 14:33:07 +02002228}
2229
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002230static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
Joerg Roedel4c894f42010-09-23 15:15:19 +02002231{
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002232 int i, j;
2233 u32 ioc_feature_control;
Joerg Roedelc1bf94e2012-05-31 17:38:11 +02002234 struct pci_dev *pdev = iommu->root_pdev;
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002235
2236 /* RD890 BIOSes may not have completely reconfigured the iommu */
Joerg Roedelc1bf94e2012-05-31 17:38:11 +02002237 if (!is_rd890_iommu(iommu->dev) || !pdev)
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002238 return;
2239
2240 /*
2241 * First, we need to ensure that the iommu is enabled. This is
2242 * controlled by a register in the northbridge
2243 */
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002244
2245 /* Select Northbridge indirect register 0x75 and enable writing */
2246 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2247 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2248
2249 /* Enable the iommu */
2250 if (!(ioc_feature_control & 0x1))
2251 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2252
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002253 /* Restore the iommu BAR */
2254 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2255 iommu->stored_addr_lo);
2256 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2257 iommu->stored_addr_hi);
2258
2259 /* Restore the l1 indirect regs for each of the 6 l1s */
2260 for (i = 0; i < 6; i++)
2261 for (j = 0; j < 0x12; j++)
2262 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2263
2264 /* Restore the l2 indirect regs */
2265 for (i = 0; i < 0x83; i++)
2266 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2267
2268 /* Lock PCI setup registers */
2269 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2270 iommu->stored_addr_lo | 1);
Joerg Roedel4c894f42010-09-23 15:15:19 +02002271}
2272
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002273static void iommu_enable_ga(struct amd_iommu *iommu)
2274{
2275#ifdef CONFIG_IRQ_REMAP
2276 switch (amd_iommu_guest_ir) {
2277 case AMD_IOMMU_GUEST_IR_VAPIC:
2278 iommu_feature_enable(iommu, CONTROL_GAM_EN);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002279 fallthrough;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002280 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2281 iommu_feature_enable(iommu, CONTROL_GA_EN);
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05002282 iommu->irte_ops = &irte_128_ops;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002283 break;
2284 default:
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05002285 iommu->irte_ops = &irte_32_ops;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002286 break;
2287 }
2288#endif
2289}
2290
Baoquan He78d313c2017-08-09 16:33:34 +08002291static void early_enable_iommu(struct amd_iommu *iommu)
2292{
2293 iommu_disable(iommu);
2294 iommu_init_flags(iommu);
2295 iommu_set_device_table(iommu);
2296 iommu_enable_command_buffer(iommu);
2297 iommu_enable_event_buffer(iommu);
2298 iommu_set_exclusion_range(iommu);
2299 iommu_enable_ga(iommu);
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002300 iommu_enable_xt(iommu);
Baoquan He78d313c2017-08-09 16:33:34 +08002301 iommu_enable(iommu);
2302 iommu_flush_all_caches(iommu);
2303}
2304
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002305/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02002306 * This function finally enables all IOMMUs found in the system after
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002307 * they have been initialized.
2308 *
2309 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2310 * the old content of device table entries. Not this case or copy failed,
2311 * just continue as normal kernel does.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002312 */
Joerg Roedel11ee5ac2012-06-12 16:30:06 +02002313static void early_enable_iommus(void)
Joerg Roedel87361972008-06-26 21:28:07 +02002314{
2315 struct amd_iommu *iommu;
2316
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002317
2318 if (!copy_device_table()) {
2319 /*
2320 * If come here because of failure in copying device table from old
2321 * kernel with all IOMMUs enabled, print error message and try to
2322 * free allocated old_dev_tbl_cpy.
2323 */
2324 if (amd_iommu_pre_enabled)
2325 pr_err("Failed to copy DEV table from previous kernel.\n");
2326 if (old_dev_tbl_cpy != NULL)
2327 free_pages((unsigned long)old_dev_tbl_cpy,
2328 get_order(dev_table_size));
2329
2330 for_each_iommu(iommu) {
2331 clear_translation_pre_enabled(iommu);
2332 early_enable_iommu(iommu);
2333 }
2334 } else {
2335 pr_info("Copied DEV table from previous kernel.\n");
2336 free_pages((unsigned long)amd_iommu_dev_table,
2337 get_order(dev_table_size));
2338 amd_iommu_dev_table = old_dev_tbl_cpy;
2339 for_each_iommu(iommu) {
2340 iommu_disable_command_buffer(iommu);
2341 iommu_disable_event_buffer(iommu);
2342 iommu_enable_command_buffer(iommu);
2343 iommu_enable_event_buffer(iommu);
2344 iommu_enable_ga(iommu);
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002345 iommu_enable_xt(iommu);
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002346 iommu_set_device_table(iommu);
2347 iommu_flush_all_caches(iommu);
2348 }
Joerg Roedel87361972008-06-26 21:28:07 +02002349 }
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002350
2351#ifdef CONFIG_IRQ_REMAP
2352 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2353 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2354#endif
Joerg Roedel87361972008-06-26 21:28:07 +02002355}
2356
Joerg Roedel11ee5ac2012-06-12 16:30:06 +02002357static void enable_iommus_v2(void)
2358{
2359 struct amd_iommu *iommu;
2360
2361 for_each_iommu(iommu) {
2362 iommu_enable_ppr_log(iommu);
2363 iommu_enable_gt(iommu);
2364 }
2365}
2366
2367static void enable_iommus(void)
2368{
2369 early_enable_iommus();
2370
2371 enable_iommus_v2();
2372}
2373
Joerg Roedel92ac4322009-05-19 19:06:27 +02002374static void disable_iommus(void)
2375{
2376 struct amd_iommu *iommu;
2377
2378 for_each_iommu(iommu)
2379 iommu_disable(iommu);
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002380
2381#ifdef CONFIG_IRQ_REMAP
2382 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2383 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2384#endif
Joerg Roedel92ac4322009-05-19 19:06:27 +02002385}
2386
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002387/*
2388 * Suspend/Resume support
2389 * disable suspend until real resume implemented
2390 */
2391
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002392static void amd_iommu_resume(void)
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002393{
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002394 struct amd_iommu *iommu;
2395
2396 for_each_iommu(iommu)
2397 iommu_apply_resume_quirks(iommu);
2398
Joerg Roedel736501e2009-05-12 09:56:12 +02002399 /* re-load the hardware */
2400 enable_iommus();
Joerg Roedel3d9761e2012-03-15 16:39:21 +01002401
2402 amd_iommu_enable_interrupts();
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002403}
2404
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002405static int amd_iommu_suspend(void)
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002406{
Joerg Roedel736501e2009-05-12 09:56:12 +02002407 /* disable IOMMUs to go out of the way for BIOS */
2408 disable_iommus();
2409
2410 return 0;
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002411}
2412
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002413static struct syscore_ops amd_iommu_syscore_ops = {
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002414 .suspend = amd_iommu_suspend,
2415 .resume = amd_iommu_resume,
2416};
2417
Joerg Roedel90b3eb02017-06-16 16:09:55 +02002418static void __init free_iommu_resources(void)
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002419{
Lucas Stachebcfa282016-10-26 13:09:53 +02002420 kmemleak_free(irq_lookup_table);
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002421 free_pages((unsigned long)irq_lookup_table,
2422 get_order(rlookup_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002423 irq_lookup_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002424
Julia Lawalla5919892015-09-13 14:15:31 +02002425 kmem_cache_destroy(amd_iommu_irq_cache);
2426 amd_iommu_irq_cache = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002427
2428 free_pages((unsigned long)amd_iommu_rlookup_table,
2429 get_order(rlookup_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002430 amd_iommu_rlookup_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002431
2432 free_pages((unsigned long)amd_iommu_alias_table,
2433 get_order(alias_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002434 amd_iommu_alias_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002435
2436 free_pages((unsigned long)amd_iommu_dev_table,
2437 get_order(dev_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002438 amd_iommu_dev_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002439
2440 free_iommu_all();
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002441}
2442
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002443/* SB IOAPIC is always on this device in AMD systems */
2444#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2445
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002446static bool __init check_ioapic_information(void)
2447{
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002448 const char *fw_bug = FW_BUG;
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002449 bool ret, has_sb_ioapic;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002450 int idx;
2451
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002452 has_sb_ioapic = false;
2453 ret = false;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002454
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002455 /*
2456 * If we have map overrides on the kernel command line the
2457 * messages in this function might not describe firmware bugs
2458 * anymore - so be careful
2459 */
2460 if (cmdline_maps)
2461 fw_bug = "";
2462
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002463 for (idx = 0; idx < nr_ioapics; idx++) {
2464 int devid, id = mpc_ioapic_id(idx);
2465
2466 devid = get_ioapic_devid(id);
2467 if (devid < 0) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002468 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002469 fw_bug, id);
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002470 ret = false;
2471 } else if (devid == IOAPIC_SB_DEVID) {
2472 has_sb_ioapic = true;
2473 ret = true;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002474 }
2475 }
2476
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002477 if (!has_sb_ioapic) {
2478 /*
2479 * We expect the SB IOAPIC to be listed in the IVRS
2480 * table. The system timer is connected to the SB IOAPIC
2481 * and if we don't have it in the list the system will
2482 * panic at boot time. This situation usually happens
2483 * when the BIOS is buggy and provides us the wrong
2484 * device id for the IOAPIC in the system.
2485 */
Joerg Roedel101fa032018-11-27 16:22:31 +01002486 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002487 }
2488
2489 if (!ret)
Joerg Roedel101fa032018-11-27 16:22:31 +01002490 pr_err("Disabling interrupt remapping\n");
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002491
2492 return ret;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002493}
2494
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002495static void __init free_dma_resources(void)
2496{
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002497 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2498 get_order(MAX_DOMAIN_ID/8));
Joerg Roedelf6019272017-06-16 16:09:58 +02002499 amd_iommu_pd_alloc_bitmap = NULL;
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002500
2501 free_unity_maps();
2502}
2503
Joerg Roedelb65233a2008-07-11 17:14:21 +02002504/*
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002505 * This is the hardware init function for AMD IOMMU in the system.
2506 * This function is called either from amd_iommu_init or from the interrupt
2507 * remapping setup code.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002508 *
2509 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002510 * four times:
Joerg Roedelb65233a2008-07-11 17:14:21 +02002511 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002512 * 1 pass) Discover the most comprehensive IVHD type to use.
2513 *
2514 * 2 pass) Find the highest PCI device id the driver has to handle.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002515 * Upon this information the size of the data structures is
2516 * determined that needs to be allocated.
2517 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002518 * 3 pass) Initialize the data structures just allocated with the
Joerg Roedelb65233a2008-07-11 17:14:21 +02002519 * information in the ACPI table about available AMD IOMMUs
2520 * in the system. It also maps the PCI devices in the
2521 * system to specific IOMMUs
2522 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002523 * 4 pass) After the basic data structures are allocated and
Joerg Roedelb65233a2008-07-11 17:14:21 +02002524 * initialized we update them with information about memory
2525 * remapping requirements parsed out of the ACPI table in
2526 * this last pass.
2527 *
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002528 * After everything is set up the IOMMUs are enabled and the necessary
2529 * hotplug and suspend notifiers are registered.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002530 */
Joerg Roedel643511b2012-06-12 12:09:35 +02002531static int __init early_amd_iommu_init(void)
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002532{
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002533 struct acpi_table_header *ivrs_base;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002534 acpi_status status;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002535 int i, remap_cache_sz, ret = 0;
Kai-Heng Feng3dfee472020-02-10 15:51:15 +08002536 u32 pci_id;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002537
Joerg Roedel643511b2012-06-12 12:09:35 +02002538 if (!amd_iommu_detected)
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002539 return -ENODEV;
2540
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002541 status = acpi_get_table("IVRS", 0, &ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002542 if (status == AE_NOT_FOUND)
2543 return -ENODEV;
2544 else if (ACPI_FAILURE(status)) {
2545 const char *err = acpi_format_exception(status);
Joerg Roedel101fa032018-11-27 16:22:31 +01002546 pr_err("IVRS table error: %s\n", err);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002547 return -EINVAL;
2548 }
2549
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002550 /*
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002551 * Validate checksum here so we don't need to do it when
2552 * we actually parse the table
2553 */
2554 ret = check_ivrs_checksum(ivrs_base);
2555 if (ret)
Rafael J. Wysocki99e8ccd2017-01-10 14:57:28 +01002556 goto out;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002557
2558 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2559 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2560
2561 /*
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002562 * First parse ACPI tables to find the largest Bus/Dev/Func
2563 * we need to handle. Upon this information the shared data
2564 * structures for the IOMMUs in the system will be allocated
2565 */
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002566 ret = find_last_devid_acpi(ivrs_base);
2567 if (ret)
Joerg Roedel3551a702010-03-01 13:52:19 +01002568 goto out;
2569
Joerg Roedelc5714842008-07-11 17:14:25 +02002570 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2571 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2572 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002573
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002574 /* Device table - directly used by all IOMMUs */
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002575 ret = -ENOMEM;
Baoquan Heb3367812017-08-09 16:33:42 +08002576 amd_iommu_dev_table = (void *)__get_free_pages(
2577 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002578 get_order(dev_table_size));
2579 if (amd_iommu_dev_table == NULL)
2580 goto out;
2581
2582 /*
2583 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2584 * IOMMU see for that device
2585 */
2586 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2587 get_order(alias_table_size));
2588 if (amd_iommu_alias_table == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002589 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002590
2591 /* IOMMU rlookup table - find the IOMMU for a specific device */
Joerg Roedel83fd5cc2008-12-16 19:17:11 +01002592 amd_iommu_rlookup_table = (void *)__get_free_pages(
2593 GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002594 get_order(rlookup_table_size));
2595 if (amd_iommu_rlookup_table == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002596 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002597
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002598 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2599 GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002600 get_order(MAX_DOMAIN_ID/8));
2601 if (amd_iommu_pd_alloc_bitmap == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002602 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002603
2604 /*
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002605 * let all alias entries point to itself
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002606 */
Joerg Roedel3a61ec32008-07-25 13:07:50 +02002607 for (i = 0; i <= amd_iommu_last_bdf; ++i)
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002608 amd_iommu_alias_table[i] = i;
2609
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002610 /*
2611 * never allocate domain 0 because its used as the non-allocated and
2612 * error value placeholder
2613 */
Baoquan He5c87f622016-09-15 16:50:51 +08002614 __set_bit(0, amd_iommu_pd_alloc_bitmap);
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002615
2616 /*
2617 * now the data structures are allocated and basically initialized
2618 * start the real acpi table scan
2619 */
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002620 ret = init_iommu_all(ivrs_base);
2621 if (ret)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002622 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002623
Kai-Heng Feng3dfee472020-02-10 15:51:15 +08002624 /* Disable IOMMU if there's Stoney Ridge graphics */
2625 for (i = 0; i < 32; i++) {
2626 pci_id = read_pci_config(0, i, 0, 0);
2627 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2628 pr_info("Disable IOMMU on Stoney Ridge\n");
2629 amd_iommu_disabled = true;
2630 break;
2631 }
2632 }
2633
Joerg Roedel11123742017-06-16 16:09:54 +02002634 /* Disable any previously enabled IOMMUs */
Baoquan He20b46df2017-08-09 16:33:44 +08002635 if (!is_kdump_kernel() || amd_iommu_disabled)
2636 disable_iommus();
Joerg Roedel11123742017-06-16 16:09:54 +02002637
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002638 if (amd_iommu_irq_remap)
2639 amd_iommu_irq_remap = check_ioapic_information();
2640
Joerg Roedel05152a02012-06-15 16:53:51 +02002641 if (amd_iommu_irq_remap) {
2642 /*
2643 * Interrupt remapping enabled, create kmem_cache for the
2644 * remapping tables.
2645 */
Wei Yongjun83ed9c12013-04-23 10:47:44 +08002646 ret = -ENOMEM;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002647 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2648 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2649 else
2650 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
Joerg Roedel05152a02012-06-15 16:53:51 +02002651 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002652 remap_cache_sz,
2653 IRQ_TABLE_ALIGNMENT,
2654 0, NULL);
Joerg Roedel05152a02012-06-15 16:53:51 +02002655 if (!amd_iommu_irq_cache)
2656 goto out;
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002657
2658 irq_lookup_table = (void *)__get_free_pages(
2659 GFP_KERNEL | __GFP_ZERO,
2660 get_order(rlookup_table_size));
Lucas Stachebcfa282016-10-26 13:09:53 +02002661 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2662 1, GFP_KERNEL);
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002663 if (!irq_lookup_table)
2664 goto out;
Joerg Roedel05152a02012-06-15 16:53:51 +02002665 }
2666
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002667 ret = init_memory_definitions(ivrs_base);
2668 if (ret)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002669 goto out;
Joerg Roedel3551a702010-03-01 13:52:19 +01002670
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002671 /* init the device table */
2672 init_device_table();
2673
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002674out:
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002675 /* Don't leak any ACPI memory */
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002676 acpi_put_table(ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002677 ivrs_base = NULL;
2678
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002679 return ret;
Joerg Roedel643511b2012-06-12 12:09:35 +02002680}
2681
Gerard Snitselaarae295142012-03-16 11:38:22 -07002682static int amd_iommu_enable_interrupts(void)
Joerg Roedel3d9761e2012-03-15 16:39:21 +01002683{
2684 struct amd_iommu *iommu;
2685 int ret = 0;
2686
2687 for_each_iommu(iommu) {
2688 ret = iommu_init_msi(iommu);
2689 if (ret)
2690 goto out;
2691 }
2692
2693out:
2694 return ret;
2695}
2696
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002697static bool detect_ivrs(void)
2698{
2699 struct acpi_table_header *ivrs_base;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002700 acpi_status status;
2701
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002702 status = acpi_get_table("IVRS", 0, &ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002703 if (status == AE_NOT_FOUND)
2704 return false;
2705 else if (ACPI_FAILURE(status)) {
2706 const char *err = acpi_format_exception(status);
Joerg Roedel101fa032018-11-27 16:22:31 +01002707 pr_err("IVRS table error: %s\n", err);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002708 return false;
2709 }
2710
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002711 acpi_put_table(ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002712
Joerg Roedel1adb7d32012-08-06 14:18:42 +02002713 /* Make sure ACS will be enabled during PCI probe */
2714 pci_request_acs();
2715
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002716 return true;
2717}
2718
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002719/****************************************************************************
2720 *
2721 * AMD IOMMU Initialization State Machine
2722 *
2723 ****************************************************************************/
2724
2725static int __init state_next(void)
2726{
2727 int ret = 0;
2728
2729 switch (init_state) {
2730 case IOMMU_START_STATE:
2731 if (!detect_ivrs()) {
2732 init_state = IOMMU_NOT_FOUND;
2733 ret = -ENODEV;
2734 } else {
2735 init_state = IOMMU_IVRS_DETECTED;
2736 }
2737 break;
2738 case IOMMU_IVRS_DETECTED:
2739 ret = early_amd_iommu_init();
2740 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
Joerg Roedel7ad820e2017-06-16 16:09:59 +02002741 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
Kai-Heng Feng3dfee472020-02-10 15:51:15 +08002742 pr_info("AMD IOMMU disabled\n");
Joerg Roedel7ad820e2017-06-16 16:09:59 +02002743 init_state = IOMMU_CMDLINE_DISABLED;
2744 ret = -EINVAL;
2745 }
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002746 break;
2747 case IOMMU_ACPI_FINISHED:
2748 early_enable_iommus();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002749 x86_platform.iommu_shutdown = disable_iommus;
2750 init_state = IOMMU_ENABLED;
2751 break;
2752 case IOMMU_ENABLED:
Joerg Roedel74ddda72017-07-26 14:17:55 +02002753 register_syscore_ops(&amd_iommu_syscore_ops);
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002754 ret = amd_iommu_init_pci();
2755 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2756 enable_iommus_v2();
2757 break;
2758 case IOMMU_PCI_INIT:
2759 ret = amd_iommu_enable_interrupts();
2760 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2761 break;
2762 case IOMMU_INTERRUPTS_EN:
Joerg Roedel1e6a7b02015-07-28 16:58:48 +02002763 ret = amd_iommu_init_dma_ops();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002764 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2765 break;
2766 case IOMMU_DMA_OPS:
2767 init_state = IOMMU_INITIALIZED;
2768 break;
2769 case IOMMU_INITIALIZED:
2770 /* Nothing to do */
2771 break;
2772 case IOMMU_NOT_FOUND:
2773 case IOMMU_INIT_ERROR:
Joerg Roedel1b1e9422017-06-16 16:09:56 +02002774 case IOMMU_CMDLINE_DISABLED:
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002775 /* Error states => do nothing */
2776 ret = -EINVAL;
2777 break;
2778 default:
2779 /* Unknown state */
2780 BUG();
2781 }
2782
Kevin Mitchell5c905012019-06-12 14:52:05 -07002783 if (ret) {
2784 free_dma_resources();
2785 if (!irq_remapping_enabled) {
2786 disable_iommus();
2787 free_iommu_resources();
2788 } else {
2789 struct amd_iommu *iommu;
2790
2791 uninit_device_table_dma();
2792 for_each_iommu(iommu)
2793 iommu_flush_all_caches(iommu);
2794 }
2795 }
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002796 return ret;
2797}
2798
2799static int __init iommu_go_to_state(enum iommu_init_state state)
2800{
Joerg Roedel151b0902017-06-16 16:09:57 +02002801 int ret = -EINVAL;
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002802
2803 while (init_state != state) {
Joerg Roedel1b1e9422017-06-16 16:09:56 +02002804 if (init_state == IOMMU_NOT_FOUND ||
2805 init_state == IOMMU_INIT_ERROR ||
2806 init_state == IOMMU_CMDLINE_DISABLED)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002807 break;
Joerg Roedel151b0902017-06-16 16:09:57 +02002808 ret = state_next();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002809 }
2810
2811 return ret;
2812}
2813
Joerg Roedel6b474b82012-06-26 16:46:04 +02002814#ifdef CONFIG_IRQ_REMAP
2815int __init amd_iommu_prepare(void)
2816{
Thomas Gleixner3f4cb7c2015-01-23 14:32:46 +01002817 int ret;
2818
Jiang Liu7fa1c842015-01-07 15:31:42 +08002819 amd_iommu_irq_remap = true;
Joerg Roedel84d07792015-01-07 15:31:39 +08002820
Thomas Gleixner3f4cb7c2015-01-23 14:32:46 +01002821 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2822 if (ret)
2823 return ret;
2824 return amd_iommu_irq_remap ? 0 : -ENODEV;
Joerg Roedel6b474b82012-06-26 16:46:04 +02002825}
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002826
Joerg Roedel6b474b82012-06-26 16:46:04 +02002827int __init amd_iommu_enable(void)
2828{
2829 int ret;
2830
2831 ret = iommu_go_to_state(IOMMU_ENABLED);
2832 if (ret)
2833 return ret;
2834
2835 irq_remapping_enabled = 1;
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002836 return amd_iommu_xt_mode;
Joerg Roedel6b474b82012-06-26 16:46:04 +02002837}
2838
2839void amd_iommu_disable(void)
2840{
2841 amd_iommu_suspend();
2842}
2843
2844int amd_iommu_reenable(int mode)
2845{
2846 amd_iommu_resume();
2847
2848 return 0;
2849}
2850
2851int __init amd_iommu_enable_faulting(void)
2852{
2853 /* We enable MSI later when PCI is initialized */
2854 return 0;
2855}
2856#endif
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002857
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002858/*
2859 * This is the core init function for AMD IOMMU hardware in the system.
2860 * This function is called from the generic x86 DMA layer initialization
2861 * code.
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002862 */
2863static int __init amd_iommu_init(void)
2864{
Gary R Hook7d0f5fd2018-06-12 16:41:30 -05002865 struct amd_iommu *iommu;
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002866 int ret;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002867
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002868 ret = iommu_go_to_state(IOMMU_INITIALIZED);
Kevin Mitchellbf4bff42019-06-12 14:52:04 -07002869#ifdef CONFIG_GART_IOMMU
2870 if (ret && list_empty(&amd_iommu_list)) {
2871 /*
2872 * We failed to initialize the AMD IOMMU - try fallback
2873 * to GART if possible.
2874 */
2875 gart_iommu_init();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002876 }
Kevin Mitchellbf4bff42019-06-12 14:52:04 -07002877#endif
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002878
Gary R Hook7d0f5fd2018-06-12 16:41:30 -05002879 for_each_iommu(iommu)
2880 amd_iommu_debugfs_setup(iommu);
2881
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002882 return ret;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002883}
2884
Tom Lendacky2543a782017-07-17 16:10:24 -05002885static bool amd_iommu_sme_check(void)
2886{
2887 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2888 return true;
2889
2890 /* For Fam17h, a specific level of support is required */
2891 if (boot_cpu_data.microcode >= 0x08001205)
2892 return true;
2893
2894 if ((boot_cpu_data.microcode >= 0x08001126) &&
2895 (boot_cpu_data.microcode <= 0x080011ff))
2896 return true;
2897
Joerg Roedel101fa032018-11-27 16:22:31 +01002898 pr_notice("IOMMU not currently supported when SME is active\n");
Tom Lendacky2543a782017-07-17 16:10:24 -05002899
2900 return false;
2901}
2902
Joerg Roedelb65233a2008-07-11 17:14:21 +02002903/****************************************************************************
2904 *
2905 * Early detect code. This code runs at IOMMU detection time in the DMA
2906 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2907 * IOMMUs
2908 *
2909 ****************************************************************************/
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -04002910int __init amd_iommu_detect(void)
Joerg Roedelae7877d2008-06-26 21:27:51 +02002911{
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002912 int ret;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002913
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09002914 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -04002915 return -ENODEV;
Joerg Roedelae7877d2008-06-26 21:27:51 +02002916
Tom Lendacky2543a782017-07-17 16:10:24 -05002917 if (!amd_iommu_sme_check())
2918 return -ENODEV;
2919
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002920 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2921 if (ret)
2922 return ret;
Linus Torvalds11bd04f2009-12-11 12:18:16 -08002923
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002924 amd_iommu_detected = true;
2925 iommu_detected = 1;
2926 x86_init.iommu.iommu_init = amd_iommu_init;
2927
Jérôme Glisse4781bc42015-08-31 18:13:03 -04002928 return 1;
Joerg Roedelae7877d2008-06-26 21:27:51 +02002929}
2930
Joerg Roedelb65233a2008-07-11 17:14:21 +02002931/****************************************************************************
2932 *
2933 * Parsing functions for the AMD IOMMU specific kernel command line
2934 * options.
2935 *
2936 ****************************************************************************/
2937
Joerg Roedelfefda112009-05-20 12:21:42 +02002938static int __init parse_amd_iommu_dump(char *str)
2939{
2940 amd_iommu_dump = true;
2941
2942 return 1;
2943}
2944
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002945static int __init parse_amd_iommu_intr(char *str)
2946{
2947 for (; *str; ++str) {
2948 if (strncmp(str, "legacy", 6) == 0) {
Suravee Suthikulpanitb74aa022020-04-22 08:30:02 -05002949 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002950 break;
2951 }
2952 if (strncmp(str, "vapic", 5) == 0) {
2953 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2954 break;
2955 }
2956 }
2957 return 1;
2958}
2959
Joerg Roedel918ad6c2008-06-26 21:27:52 +02002960static int __init parse_amd_iommu_options(char *str)
2961{
2962 for (; *str; ++str) {
Joerg Roedel695b5672008-11-17 15:16:43 +01002963 if (strncmp(str, "fullflush", 9) == 0)
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +09002964 amd_iommu_unmap_flush = true;
Joerg Roedela5235722010-05-11 17:12:33 +02002965 if (strncmp(str, "off", 3) == 0)
2966 amd_iommu_disabled = true;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002967 if (strncmp(str, "force_isolation", 15) == 0)
2968 amd_iommu_force_isolation = true;
Joerg Roedel918ad6c2008-06-26 21:27:52 +02002969 }
2970
2971 return 1;
2972}
2973
Joerg Roedel440e89982013-04-09 16:35:28 +02002974static int __init parse_ivrs_ioapic(char *str)
2975{
2976 unsigned int bus, dev, fn;
2977 int ret, id, i;
2978 u16 devid;
2979
2980 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2981
2982 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002983 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
Joerg Roedel440e89982013-04-09 16:35:28 +02002984 return 1;
2985 }
2986
2987 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002988 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
Joerg Roedel440e89982013-04-09 16:35:28 +02002989 str);
2990 return 1;
2991 }
2992
2993 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2994
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002995 cmdline_maps = true;
Joerg Roedel440e89982013-04-09 16:35:28 +02002996 i = early_ioapic_map_size++;
2997 early_ioapic_map[i].id = id;
2998 early_ioapic_map[i].devid = devid;
2999 early_ioapic_map[i].cmd_line = true;
3000
3001 return 1;
3002}
3003
3004static int __init parse_ivrs_hpet(char *str)
3005{
3006 unsigned int bus, dev, fn;
3007 int ret, id, i;
3008 u16 devid;
3009
3010 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3011
3012 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003013 pr_err("Invalid command line: ivrs_hpet%s\n", str);
Joerg Roedel440e89982013-04-09 16:35:28 +02003014 return 1;
3015 }
3016
3017 if (early_hpet_map_size == EARLY_MAP_SIZE) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003018 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
Joerg Roedel440e89982013-04-09 16:35:28 +02003019 str);
3020 return 1;
3021 }
3022
3023 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3024
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02003025 cmdline_maps = true;
Joerg Roedel440e89982013-04-09 16:35:28 +02003026 i = early_hpet_map_size++;
3027 early_hpet_map[i].id = id;
3028 early_hpet_map[i].devid = devid;
3029 early_hpet_map[i].cmd_line = true;
3030
3031 return 1;
3032}
3033
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04003034static int __init parse_ivrs_acpihid(char *str)
3035{
3036 u32 bus, dev, fn;
3037 char *hid, *uid, *p;
3038 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3039 int ret, i;
3040
3041 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3042 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003043 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04003044 return 1;
3045 }
3046
3047 p = acpiid;
3048 hid = strsep(&p, ":");
3049 uid = p;
3050
3051 if (!hid || !(*hid) || !uid) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003052 pr_err("Invalid command line: hid or uid\n");
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04003053 return 1;
3054 }
3055
3056 i = early_acpihid_map_size++;
3057 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3058 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3059 early_acpihid_map[i].devid =
3060 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3061 early_acpihid_map[i].cmd_line = true;
3062
3063 return 1;
3064}
3065
Joerg Roedel440e89982013-04-09 16:35:28 +02003066__setup("amd_iommu_dump", parse_amd_iommu_dump);
3067__setup("amd_iommu=", parse_amd_iommu_options);
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05003068__setup("amd_iommu_intr=", parse_amd_iommu_intr);
Joerg Roedel440e89982013-04-09 16:35:28 +02003069__setup("ivrs_ioapic", parse_ivrs_ioapic);
3070__setup("ivrs_hpet", parse_ivrs_hpet);
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04003071__setup("ivrs_acpihid", parse_ivrs_acpihid);
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -04003072
3073IOMMU_INIT_FINISH(amd_iommu_detect,
3074 gart_iommu_hole_init,
Joerg Roedel98f1ad22012-07-06 13:28:37 +02003075 NULL,
3076 NULL);
Joerg Roedel400a28a2011-11-28 15:11:02 +01003077
3078bool amd_iommu_v2_supported(void)
3079{
3080 return amd_iommu_v2_present;
3081}
3082EXPORT_SYMBOL(amd_iommu_v2_supported);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003083
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003084struct amd_iommu *get_amd_iommu(unsigned int idx)
3085{
3086 unsigned int i = 0;
3087 struct amd_iommu *iommu;
3088
3089 for_each_iommu(iommu)
3090 if (i++ == idx)
3091 return iommu;
3092 return NULL;
3093}
3094EXPORT_SYMBOL(get_amd_iommu);
3095
Steven L Kinney30861dd2013-06-05 16:11:48 -05003096/****************************************************************************
3097 *
3098 * IOMMU EFR Performance Counter support functionality. This code allows
3099 * access to the IOMMU PC functionality.
3100 *
3101 ****************************************************************************/
3102
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003103u8 amd_iommu_pc_get_max_banks(unsigned int idx)
Steven L Kinney30861dd2013-06-05 16:11:48 -05003104{
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003105 struct amd_iommu *iommu = get_amd_iommu(idx);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003106
Steven L Kinney30861dd2013-06-05 16:11:48 -05003107 if (iommu)
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003108 return iommu->max_banks;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003109
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003110 return 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003111}
3112EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3113
3114bool amd_iommu_pc_supported(void)
3115{
3116 return amd_iommu_pc_present;
3117}
3118EXPORT_SYMBOL(amd_iommu_pc_supported);
3119
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003120u8 amd_iommu_pc_get_max_counters(unsigned int idx)
Steven L Kinney30861dd2013-06-05 16:11:48 -05003121{
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003122 struct amd_iommu *iommu = get_amd_iommu(idx);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003123
Steven L Kinney30861dd2013-06-05 16:11:48 -05003124 if (iommu)
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003125 return iommu->max_counters;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003126
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003127 return 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003128}
3129EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3130
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003131static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3132 u8 fxn, u64 *value, bool is_write)
Steven L Kinney30861dd2013-06-05 16:11:48 -05003133{
Steven L Kinney30861dd2013-06-05 16:11:48 -05003134 u32 offset;
3135 u32 max_offset_lim;
3136
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003137 /* Make sure the IOMMU PC resource is available */
3138 if (!amd_iommu_pc_present)
3139 return -ENODEV;
3140
Steven L Kinney30861dd2013-06-05 16:11:48 -05003141 /* Check for valid iommu and pc register indexing */
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003142 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
Steven L Kinney30861dd2013-06-05 16:11:48 -05003143 return -ENODEV;
3144
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003145 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003146
3147 /* Limit the offset to the hw defined mmio region aperture */
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003148 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
Steven L Kinney30861dd2013-06-05 16:11:48 -05003149 (iommu->max_counters << 8) | 0x28);
3150 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3151 (offset > max_offset_lim))
3152 return -EINVAL;
3153
3154 if (is_write) {
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003155 u64 val = *value & GENMASK_ULL(47, 0);
3156
3157 writel((u32)val, iommu->mmio_base + offset);
3158 writel((val >> 32), iommu->mmio_base + offset + 4);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003159 } else {
3160 *value = readl(iommu->mmio_base + offset + 4);
3161 *value <<= 32;
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003162 *value |= readl(iommu->mmio_base + offset);
3163 *value &= GENMASK_ULL(47, 0);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003164 }
3165
3166 return 0;
3167}
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003168
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003169int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003170{
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003171 if (!iommu)
3172 return -EINVAL;
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003173
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003174 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003175}
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003176EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3177
3178int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3179{
3180 if (!iommu)
3181 return -EINVAL;
3182
3183 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3184}
3185EXPORT_SYMBOL(amd_iommu_pc_set_reg);