blob: f773792d77fd533be53ec3796692c140f77121d7 [file] [log] [blame]
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02001/*
Joerg Roedel5d0d7152010-10-13 11:13:21 +02002 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01003 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +02004 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel101fa032018-11-27 16:22:31 +010020#define pr_fmt(fmt) "AMD-Vi: " fmt
Bjorn Helgaas5f226da2019-02-08 16:05:53 -060021#define dev_fmt(fmt) pr_fmt(fmt)
Joerg Roedel101fa032018-11-27 16:22:31 +010022
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020023#include <linux/pci.h>
24#include <linux/acpi.h>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020025#include <linux/list.h>
Baoquan He5c87f622016-09-15 16:50:51 +080026#include <linux/bitmap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +010028#include <linux/syscore_ops.h>
Joerg Roedela80dc3e2008-09-11 16:51:41 +020029#include <linux/interrupt.h>
30#include <linux/msi.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020031#include <linux/amd-iommu.h>
Joerg Roedel400a28a2011-11-28 15:11:02 +010032#include <linux/export.h>
Alex Williamson066f2e92014-06-12 16:12:37 -060033#include <linux/iommu.h>
Lucas Stachebcfa282016-10-26 13:09:53 +020034#include <linux/kmemleak.h>
Tom Lendacky2543a782017-07-17 16:10:24 -050035#include <linux/mem_encrypt.h>
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020036#include <asm/pci-direct.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090037#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010038#include <asm/gart.h>
FUJITA Tomonoriea1b0d32009-11-10 19:46:15 +090039#include <asm/x86_init.h>
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -040040#include <asm/iommu_table.h>
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +020041#include <asm/io_apic.h>
Joerg Roedel6b474b82012-06-26 16:46:04 +020042#include <asm/irq_remapping.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020043
Baoquan He3ac3e5ee2017-08-09 16:33:38 +080044#include <linux/crash_dump.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020045#include "amd_iommu_proto.h"
46#include "amd_iommu_types.h"
Joerg Roedel05152a02012-06-15 16:53:51 +020047#include "irq_remapping.h"
Joerg Roedel403f81d2011-06-14 16:44:25 +020048
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020049/*
50 * definitions for the ACPI scanning code
51 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020052#define IVRS_HEADER_LENGTH 48
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020053
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -040054#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020055#define ACPI_IVMD_TYPE_ALL 0x20
56#define ACPI_IVMD_TYPE 0x21
57#define ACPI_IVMD_TYPE_RANGE 0x22
58
59#define IVHD_DEV_ALL 0x01
60#define IVHD_DEV_SELECT 0x02
61#define IVHD_DEV_SELECT_RANGE_START 0x03
62#define IVHD_DEV_RANGE_END 0x04
63#define IVHD_DEV_ALIAS 0x42
64#define IVHD_DEV_ALIAS_RANGE 0x43
65#define IVHD_DEV_EXT_SELECT 0x46
66#define IVHD_DEV_EXT_SELECT_RANGE 0x47
Joerg Roedel6efed632012-06-14 15:52:58 +020067#define IVHD_DEV_SPECIAL 0x48
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -040068#define IVHD_DEV_ACPI_HID 0xf0
Joerg Roedel6efed632012-06-14 15:52:58 +020069
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -040070#define UID_NOT_PRESENT 0
71#define UID_IS_INTEGER 1
72#define UID_IS_CHARACTER 2
73
Joerg Roedel6efed632012-06-14 15:52:58 +020074#define IVHD_SPECIAL_IOAPIC 1
75#define IVHD_SPECIAL_HPET 2
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020076
Joerg Roedel6da73422009-05-04 11:44:38 +020077#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
78#define IVHD_FLAG_PASSPW_EN_MASK 0x02
79#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
80#define IVHD_FLAG_ISOC_EN_MASK 0x08
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +020081
82#define IVMD_FLAG_EXCL_RANGE 0x08
83#define IVMD_FLAG_UNITY_MAP 0x01
84
85#define ACPI_DEVFLAG_INITPASS 0x01
86#define ACPI_DEVFLAG_EXTINT 0x02
87#define ACPI_DEVFLAG_NMI 0x04
88#define ACPI_DEVFLAG_SYSMGT1 0x10
89#define ACPI_DEVFLAG_SYSMGT2 0x20
90#define ACPI_DEVFLAG_LINT0 0x40
91#define ACPI_DEVFLAG_LINT1 0x80
92#define ACPI_DEVFLAG_ATSDIS 0x10000000
93
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -050094#define LOOP_TIMEOUT 100000
Joerg Roedelb65233a2008-07-11 17:14:21 +020095/*
96 * ACPI table definitions
97 *
98 * These data structures are laid over the table to parse the important values
99 * out of it.
100 */
101
Joerg Roedelb0119e82017-02-01 13:23:08 +0100102extern const struct iommu_ops amd_iommu_ops;
103
Joerg Roedelb65233a2008-07-11 17:14:21 +0200104/*
105 * structure describing one IOMMU in the ACPI table. Typically followed by one
106 * or more ivhd_entrys.
107 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200108struct ivhd_header {
109 u8 type;
110 u8 flags;
111 u16 length;
112 u16 devid;
113 u16 cap_ptr;
114 u64 mmio_phys;
115 u16 pci_seg;
116 u16 info;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -0400117 u32 efr_attr;
118
119 /* Following only valid on IVHD type 11h and 40h */
120 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
121 u64 res;
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200122} __attribute__((packed));
123
Joerg Roedelb65233a2008-07-11 17:14:21 +0200124/*
125 * A device entry describing which devices a specific IOMMU translates and
126 * which requestor ids they use.
127 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200128struct ivhd_entry {
129 u8 type;
130 u16 devid;
131 u8 flags;
132 u32 ext;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400133 u32 hidh;
134 u64 cid;
135 u8 uidf;
136 u8 uidl;
137 u8 uid;
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200138} __attribute__((packed));
139
Joerg Roedelb65233a2008-07-11 17:14:21 +0200140/*
141 * An AMD IOMMU memory definition structure. It defines things like exclusion
142 * ranges for devices and regions that should be unity mapped.
143 */
Joerg Roedelf6e2e6b2008-06-26 21:27:39 +0200144struct ivmd_header {
145 u8 type;
146 u8 flags;
147 u16 length;
148 u16 devid;
149 u16 aux;
150 u64 resv;
151 u64 range_start;
152 u64 range_length;
153} __attribute__((packed));
154
Joerg Roedelfefda112009-05-20 12:21:42 +0200155bool amd_iommu_dump;
Joerg Roedel05152a02012-06-15 16:53:51 +0200156bool amd_iommu_irq_remap __read_mostly;
Joerg Roedelfefda112009-05-20 12:21:42 +0200157
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -0500158int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -0500159static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -0500160
Joerg Roedel02f3b3f2012-06-11 17:45:25 +0200161static bool amd_iommu_detected;
Joerg Roedela5235722010-05-11 17:12:33 +0200162static bool __initdata amd_iommu_disabled;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400163static int amd_iommu_target_ivhd_type;
Joerg Roedelc1cbebe2008-07-03 19:35:10 +0200164
Joerg Roedelb65233a2008-07-11 17:14:21 +0200165u16 amd_iommu_last_bdf; /* largest PCI device id we have
166 to handle */
Joerg Roedel2e228472008-07-11 17:14:31 +0200167LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
Joerg Roedelb65233a2008-07-11 17:14:21 +0200168 we find in ACPI */
Viresh Kumar621a5f72015-09-26 15:04:07 -0700169bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
Joerg Roedel928abd22008-06-26 21:27:40 +0200170
Joerg Roedel2e228472008-07-11 17:14:31 +0200171LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
Joerg Roedelb65233a2008-07-11 17:14:21 +0200172 system */
173
Joerg Roedelbb527772009-11-20 14:31:51 +0100174/* Array to assign indices to IOMMUs*/
175struct amd_iommu *amd_iommus[MAX_IOMMUS];
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -0600176
177/* Number of IOMMUs present in the system */
178static int amd_iommus_present;
Joerg Roedelbb527772009-11-20 14:31:51 +0100179
Joerg Roedel318afd42009-11-23 18:32:38 +0100180/* IOMMUs have a non-present cache? */
181bool amd_iommu_np_cache __read_mostly;
Joerg Roedel60f723b2011-04-05 12:50:24 +0200182bool amd_iommu_iotlb_sup __read_mostly = true;
Joerg Roedel318afd42009-11-23 18:32:38 +0100183
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600184u32 amd_iommu_max_pasid __read_mostly = ~0;
Joerg Roedel62f71ab2011-11-10 14:41:57 +0100185
Joerg Roedel400a28a2011-11-28 15:11:02 +0100186bool amd_iommu_v2_present __read_mostly;
Joerg Roedel4160cd92015-08-13 11:31:48 +0200187static bool amd_iommu_pc_present __read_mostly;
Joerg Roedel400a28a2011-11-28 15:11:02 +0100188
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100189bool amd_iommu_force_isolation __read_mostly;
190
Joerg Roedelb65233a2008-07-11 17:14:21 +0200191/*
Joerg Roedelaeb26f52009-11-20 16:44:01 +0100192 * List of protection domains - used during resume
193 */
194LIST_HEAD(amd_iommu_pd_list);
195spinlock_t amd_iommu_pd_lock;
196
197/*
Joerg Roedelb65233a2008-07-11 17:14:21 +0200198 * Pointer to the device table which is shared by all AMD IOMMUs
199 * it is indexed by the PCI device id or the HT unit id and contains
200 * information about the domain the device belongs to as well as the
201 * page table root pointer.
202 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200203struct dev_table_entry *amd_iommu_dev_table;
Baoquan He45a01c42017-08-09 16:33:37 +0800204/*
205 * Pointer to a device table which the content of old device table
206 * will be copied to. It's only be used in kdump kernel.
207 */
208static struct dev_table_entry *old_dev_tbl_cpy;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200209
210/*
211 * The alias table is a driver specific data structure which contains the
212 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
213 * More than one device can share the same requestor id.
214 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200215u16 *amd_iommu_alias_table;
Joerg Roedelb65233a2008-07-11 17:14:21 +0200216
217/*
218 * The rlookup table is used to find the IOMMU which is responsible
219 * for a specific device. It is also indexed by the PCI device id.
220 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200221struct amd_iommu **amd_iommu_rlookup_table;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800222EXPORT_SYMBOL(amd_iommu_rlookup_table);
Joerg Roedelb65233a2008-07-11 17:14:21 +0200223
224/*
Joerg Roedel0ea2c422012-06-15 18:05:20 +0200225 * This table is used to find the irq remapping table for a given device id
226 * quickly.
227 */
228struct irq_remap_table **irq_lookup_table;
229
230/*
Frank Arnolddf805ab2012-08-27 19:21:04 +0200231 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
Joerg Roedelb65233a2008-07-11 17:14:21 +0200232 * to know which ones are already in use.
233 */
Joerg Roedel928abd22008-06-26 21:27:40 +0200234unsigned long *amd_iommu_pd_alloc_bitmap;
235
Joerg Roedelb65233a2008-07-11 17:14:21 +0200236static u32 dev_table_size; /* size of the device table */
237static u32 alias_table_size; /* size of the alias table */
238static u32 rlookup_table_size; /* size if the rlookup table */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200239
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200240enum iommu_init_state {
241 IOMMU_START_STATE,
242 IOMMU_IVRS_DETECTED,
243 IOMMU_ACPI_FINISHED,
244 IOMMU_ENABLED,
245 IOMMU_PCI_INIT,
246 IOMMU_INTERRUPTS_EN,
247 IOMMU_DMA_OPS,
248 IOMMU_INITIALIZED,
249 IOMMU_NOT_FOUND,
250 IOMMU_INIT_ERROR,
Joerg Roedel1b1e9422017-06-16 16:09:56 +0200251 IOMMU_CMDLINE_DISABLED,
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200252};
253
Joerg Roedel235dacb2013-04-09 17:53:14 +0200254/* Early ioapic and hpet maps from kernel command line */
255#define EARLY_MAP_SIZE 4
256static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
257static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400258static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
259
Joerg Roedel235dacb2013-04-09 17:53:14 +0200260static int __initdata early_ioapic_map_size;
261static int __initdata early_hpet_map_size;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -0400262static int __initdata early_acpihid_map_size;
263
Joerg Roedeldfbb6d42013-04-09 19:06:18 +0200264static bool __initdata cmdline_maps;
Joerg Roedel235dacb2013-04-09 17:53:14 +0200265
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200266static enum iommu_init_state init_state = IOMMU_START_STATE;
267
Gerard Snitselaarae295142012-03-16 11:38:22 -0700268static int amd_iommu_enable_interrupts(void);
Joerg Roedel2c0ae172012-06-12 15:59:30 +0200269static int __init iommu_go_to_state(enum iommu_init_state state);
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200270static void init_device_table_dma(void);
Joerg Roedel3d9761e2012-03-15 16:39:21 +0100271
Joerg Roedel2479c632017-08-19 00:35:40 +0200272static bool amd_iommu_pre_enabled = true;
Baoquan He3ac3e5ee2017-08-09 16:33:38 +0800273
Baoquan He4c232a72017-08-09 16:33:33 +0800274bool translation_pre_enabled(struct amd_iommu *iommu)
275{
276 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
277}
Baoquan Hedaae2d22017-08-09 16:33:43 +0800278EXPORT_SYMBOL(translation_pre_enabled);
Baoquan He4c232a72017-08-09 16:33:33 +0800279
280static void clear_translation_pre_enabled(struct amd_iommu *iommu)
281{
282 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
283}
284
285static void init_translation_status(struct amd_iommu *iommu)
286{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500287 u64 ctrl;
Baoquan He4c232a72017-08-09 16:33:33 +0800288
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500289 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
Baoquan He4c232a72017-08-09 16:33:33 +0800290 if (ctrl & (1<<CONTROL_IOMMU_EN))
291 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
292}
293
Joerg Roedel208ec8c2008-07-11 17:14:24 +0200294static inline void update_last_devid(u16 devid)
295{
296 if (devid > amd_iommu_last_bdf)
297 amd_iommu_last_bdf = devid;
298}
299
Joerg Roedelc5714842008-07-11 17:14:25 +0200300static inline unsigned long tbl_size(int entry_size)
301{
302 unsigned shift = PAGE_SHIFT +
Neil Turton421f9092009-05-14 14:00:35 +0100303 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
Joerg Roedelc5714842008-07-11 17:14:25 +0200304
305 return 1UL << shift;
306}
307
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -0600308int amd_iommu_get_num_iommus(void)
309{
310 return amd_iommus_present;
311}
312
Matthew Garrett5bcd7572010-10-04 14:59:31 -0400313/* Access to l1 and l2 indexed register spaces */
314
315static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
316{
317 u32 val;
318
319 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
320 pci_read_config_dword(iommu->dev, 0xfc, &val);
321 return val;
322}
323
324static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
325{
326 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
327 pci_write_config_dword(iommu->dev, 0xfc, val);
328 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
329}
330
331static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
332{
333 u32 val;
334
335 pci_write_config_dword(iommu->dev, 0xf0, address);
336 pci_read_config_dword(iommu->dev, 0xf4, &val);
337 return val;
338}
339
340static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
341{
342 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
343 pci_write_config_dword(iommu->dev, 0xf4, val);
344}
345
Joerg Roedelb65233a2008-07-11 17:14:21 +0200346/****************************************************************************
347 *
348 * AMD IOMMU MMIO register space handling functions
349 *
350 * These functions are used to program the IOMMU device registers in
351 * MMIO space required for that driver.
352 *
353 ****************************************************************************/
354
355/*
356 * This function set the exclusion range in the IOMMU. DMA accesses to the
357 * exclusion range are passed through untranslated
358 */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200359static void iommu_set_exclusion_range(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200360{
361 u64 start = iommu->exclusion_start & PAGE_MASK;
362 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
363 u64 entry;
364
365 if (!iommu->exclusion_start)
366 return;
367
368 entry = start | MMIO_EXCL_ENABLE_MASK;
369 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
370 &entry, sizeof(entry));
371
372 entry = limit;
373 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
374 &entry, sizeof(entry));
375}
376
Joerg Roedelb65233a2008-07-11 17:14:21 +0200377/* Programs the physical address of the device table into the IOMMU hardware */
Jan Beulich6b7f0002012-03-08 08:58:13 +0000378static void iommu_set_device_table(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200379{
Andreas Herrmannf6098912008-10-16 16:27:36 +0200380 u64 entry;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200381
382 BUG_ON(iommu->mmio_base == NULL);
383
Tom Lendacky2543a782017-07-17 16:10:24 -0500384 entry = iommu_virt_to_phys(amd_iommu_dev_table);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200385 entry |= (dev_table_size >> 12) - 1;
386 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
387 &entry, sizeof(entry));
388}
389
Joerg Roedelb65233a2008-07-11 17:14:21 +0200390/* Generic functions to enable/disable certain features of the IOMMU. */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200391static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200392{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500393 u64 ctrl;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200394
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500395 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
396 ctrl |= (1ULL << bit);
397 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200398}
399
Joerg Roedelca0207112009-10-28 18:02:26 +0100400static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200401{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500402 u64 ctrl;
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200403
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500404 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
405 ctrl &= ~(1ULL << bit);
406 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200407}
408
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100409static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
410{
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500411 u64 ctrl;
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100412
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500413 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100414 ctrl &= ~CTRL_INV_TO_MASK;
415 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
Suravee Suthikulpanite881dbd2018-06-27 10:31:21 -0500416 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
Joerg Roedel1456e9d2011-12-22 14:51:53 +0100417}
418
Joerg Roedelb65233a2008-07-11 17:14:21 +0200419/* Function to enable the hardware */
Joerg Roedel05f92db2009-05-12 09:52:46 +0200420static void iommu_enable(struct amd_iommu *iommu)
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200421{
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200422 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
Joerg Roedelb2026aa2008-06-26 21:27:44 +0200423}
424
Joerg Roedel92ac4322009-05-19 19:06:27 +0200425static void iommu_disable(struct amd_iommu *iommu)
Joerg Roedel126c52b2008-09-09 16:47:35 +0200426{
Chris Wrighta8c485b2009-06-15 15:53:45 +0200427 /* Disable command buffer */
428 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
429
430 /* Disable event logging and event interrupts */
431 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
432 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
433
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500434 /* Disable IOMMU GA_LOG */
435 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
436 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
437
Chris Wrighta8c485b2009-06-15 15:53:45 +0200438 /* Disable IOMMU hardware itself */
Joerg Roedel92ac4322009-05-19 19:06:27 +0200439 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
Joerg Roedel126c52b2008-09-09 16:47:35 +0200440}
441
Joerg Roedelb65233a2008-07-11 17:14:21 +0200442/*
443 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
444 * the system has one.
445 */
Steven L Kinney30861dd2013-06-05 16:11:48 -0500446static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
Joerg Roedel6c567472008-06-26 21:27:43 +0200447{
Steven L Kinney30861dd2013-06-05 16:11:48 -0500448 if (!request_mem_region(address, end, "amd_iommu")) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100449 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
Steven L Kinney30861dd2013-06-05 16:11:48 -0500450 address, end);
Joerg Roedel101fa032018-11-27 16:22:31 +0100451 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
Joerg Roedel6c567472008-06-26 21:27:43 +0200452 return NULL;
Joerg Roedele82752d2010-05-28 14:26:48 +0200453 }
Joerg Roedel6c567472008-06-26 21:27:43 +0200454
Steven L Kinney30861dd2013-06-05 16:11:48 -0500455 return (u8 __iomem *)ioremap_nocache(address, end);
Joerg Roedel6c567472008-06-26 21:27:43 +0200456}
457
458static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
459{
460 if (iommu->mmio_base)
461 iounmap(iommu->mmio_base);
Steven L Kinney30861dd2013-06-05 16:11:48 -0500462 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
Joerg Roedel6c567472008-06-26 21:27:43 +0200463}
464
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400465static inline u32 get_ivhd_header_size(struct ivhd_header *h)
466{
467 u32 size = 0;
468
469 switch (h->type) {
470 case 0x10:
471 size = 24;
472 break;
473 case 0x11:
474 case 0x40:
475 size = 40;
476 break;
477 }
478 return size;
479}
480
Joerg Roedelb65233a2008-07-11 17:14:21 +0200481/****************************************************************************
482 *
483 * The functions below belong to the first pass of AMD IOMMU ACPI table
484 * parsing. In this pass we try to find out the highest device id this
485 * code has to handle. Upon this information the size of the shared data
486 * structures is determined later.
487 *
488 ****************************************************************************/
489
490/*
Joerg Roedelb514e552008-09-17 17:14:27 +0200491 * This function calculates the length of a given IVHD entry
492 */
493static inline int ivhd_entry_length(u8 *ivhd)
494{
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400495 u32 type = ((struct ivhd_entry *)ivhd)->type;
496
497 if (type < 0x80) {
498 return 0x04 << (*ivhd >> 6);
499 } else if (type == IVHD_DEV_ACPI_HID) {
500 /* For ACPI_HID, offset 21 is uid len */
501 return *((u8 *)ivhd + 21) + 22;
502 }
503 return 0;
Joerg Roedelb514e552008-09-17 17:14:27 +0200504}
505
506/*
Joerg Roedelb65233a2008-07-11 17:14:21 +0200507 * After reading the highest device id from the IOMMU PCI capability header
508 * this function looks if there is a higher device id defined in the ACPI table
509 */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200510static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
511{
512 u8 *p = (void *)h, *end = (void *)h;
513 struct ivhd_entry *dev;
514
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400515 u32 ivhd_size = get_ivhd_header_size(h);
516
517 if (!ivhd_size) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100518 pr_err("Unsupported IVHD type %#x\n", h->type);
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -0400519 return -EINVAL;
520 }
521
522 p += ivhd_size;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200523 end += h->length;
524
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200525 while (p < end) {
526 dev = (struct ivhd_entry *)p;
527 switch (dev->type) {
Joerg Roedeld1259412015-10-20 17:33:43 +0200528 case IVHD_DEV_ALL:
529 /* Use maximum BDF value for DEV_ALL */
530 update_last_devid(0xffff);
531 break;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200532 case IVHD_DEV_SELECT:
533 case IVHD_DEV_RANGE_END:
534 case IVHD_DEV_ALIAS:
535 case IVHD_DEV_EXT_SELECT:
Joerg Roedelb65233a2008-07-11 17:14:21 +0200536 /* all the above subfield types refer to device ids */
Joerg Roedel208ec8c2008-07-11 17:14:24 +0200537 update_last_devid(dev->devid);
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200538 break;
539 default:
540 break;
541 }
Joerg Roedelb514e552008-09-17 17:14:27 +0200542 p += ivhd_entry_length(p);
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200543 }
544
545 WARN_ON(p != end);
546
547 return 0;
548}
549
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400550static int __init check_ivrs_checksum(struct acpi_table_header *table)
551{
552 int i;
553 u8 checksum = 0, *p = (u8 *)table;
554
555 for (i = 0; i < table->length; ++i)
556 checksum += p[i];
557 if (checksum != 0) {
558 /* ACPI table corrupt */
Joerg Roedel101fa032018-11-27 16:22:31 +0100559 pr_err(FW_BUG "IVRS invalid checksum\n");
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400560 return -ENODEV;
561 }
562
563 return 0;
564}
565
Joerg Roedelb65233a2008-07-11 17:14:21 +0200566/*
567 * Iterate over all IVHD entries in the ACPI table and find the highest device
568 * id which we need to handle. This is the first of three functions which parse
569 * the ACPI table. So we check the checksum here.
570 */
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200571static int __init find_last_devid_acpi(struct acpi_table_header *table)
572{
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400573 u8 *p = (u8 *)table, *end = (u8 *)table;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200574 struct ivhd_header *h;
575
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200576 p += IVRS_HEADER_LENGTH;
577
578 end += table->length;
579 while (p < end) {
580 h = (struct ivhd_header *)p;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -0400581 if (h->type == amd_iommu_target_ivhd_type) {
582 int ret = find_last_devid_from_ivhd(h);
583
584 if (ret)
585 return ret;
Joerg Roedel3e8064b2008-06-26 21:27:41 +0200586 }
587 p += h->length;
588 }
589 WARN_ON(p != end);
590
591 return 0;
592}
593
Joerg Roedelb65233a2008-07-11 17:14:21 +0200594/****************************************************************************
595 *
Frank Arnolddf805ab2012-08-27 19:21:04 +0200596 * The following functions belong to the code path which parses the ACPI table
Joerg Roedelb65233a2008-07-11 17:14:21 +0200597 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
598 * data structures, initialize the device/alias/rlookup table and also
599 * basically initialize the hardware.
600 *
601 ****************************************************************************/
602
603/*
604 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
605 * write commands to that buffer later and the IOMMU will execute them
606 * asynchronously
607 */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200608static int __init alloc_command_buffer(struct amd_iommu *iommu)
Joerg Roedelb36ca912008-06-26 21:27:45 +0200609{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200610 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
611 get_order(CMD_BUFFER_SIZE));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200612
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200613 return iommu->cmd_buf ? 0 : -ENOMEM;
Joerg Roedel58492e12009-05-04 18:41:16 +0200614}
615
616/*
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200617 * This function resets the command buffer if the IOMMU stopped fetching
618 * commands from it.
619 */
620void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
621{
622 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
623
624 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
625 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Tom Lendackyd334a562017-06-05 14:52:12 -0500626 iommu->cmd_buf_head = 0;
627 iommu->cmd_buf_tail = 0;
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200628
629 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
630}
631
632/*
Joerg Roedel58492e12009-05-04 18:41:16 +0200633 * This function writes the command buffer address to the hardware and
634 * enables it.
635 */
636static void iommu_enable_command_buffer(struct amd_iommu *iommu)
637{
638 u64 entry;
639
640 BUG_ON(iommu->cmd_buf == NULL);
641
Tom Lendacky2543a782017-07-17 16:10:24 -0500642 entry = iommu_virt_to_phys(iommu->cmd_buf);
Joerg Roedelb36ca912008-06-26 21:27:45 +0200643 entry |= MMIO_CMD_SIZE_512;
Joerg Roedel58492e12009-05-04 18:41:16 +0200644
Joerg Roedelb36ca912008-06-26 21:27:45 +0200645 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
Joerg Roedel58492e12009-05-04 18:41:16 +0200646 &entry, sizeof(entry));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200647
Joerg Roedel93f1cc672009-09-03 14:50:20 +0200648 amd_iommu_reset_cmd_buffer(iommu);
Joerg Roedelb36ca912008-06-26 21:27:45 +0200649}
650
Baoquan He78d313c2017-08-09 16:33:34 +0800651/*
652 * This function disables the command buffer
653 */
654static void iommu_disable_command_buffer(struct amd_iommu *iommu)
655{
656 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
657}
658
Joerg Roedelb36ca912008-06-26 21:27:45 +0200659static void __init free_command_buffer(struct amd_iommu *iommu)
660{
Joerg Roedeldeba4bc2015-10-20 17:33:41 +0200661 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
Joerg Roedelb36ca912008-06-26 21:27:45 +0200662}
663
Joerg Roedel335503e2008-09-05 14:29:07 +0200664/* allocates the memory where the IOMMU will log its events to */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200665static int __init alloc_event_buffer(struct amd_iommu *iommu)
Joerg Roedel335503e2008-09-05 14:29:07 +0200666{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200667 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
668 get_order(EVT_BUFFER_SIZE));
Joerg Roedel335503e2008-09-05 14:29:07 +0200669
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200670 return iommu->evt_buf ? 0 : -ENOMEM;
Joerg Roedel58492e12009-05-04 18:41:16 +0200671}
672
673static void iommu_enable_event_buffer(struct amd_iommu *iommu)
674{
675 u64 entry;
676
677 BUG_ON(iommu->evt_buf == NULL);
678
Tom Lendacky2543a782017-07-17 16:10:24 -0500679 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
Joerg Roedel58492e12009-05-04 18:41:16 +0200680
Joerg Roedel335503e2008-09-05 14:29:07 +0200681 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
682 &entry, sizeof(entry));
683
Joerg Roedel090672072009-06-15 16:06:48 +0200684 /* set head and tail to zero manually */
685 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
686 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
687
Joerg Roedel58492e12009-05-04 18:41:16 +0200688 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
Joerg Roedel335503e2008-09-05 14:29:07 +0200689}
690
Baoquan He78d313c2017-08-09 16:33:34 +0800691/*
692 * This function disables the event log buffer
693 */
694static void iommu_disable_event_buffer(struct amd_iommu *iommu)
695{
696 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
697}
698
Joerg Roedel335503e2008-09-05 14:29:07 +0200699static void __init free_event_buffer(struct amd_iommu *iommu)
700{
701 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
702}
703
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100704/* allocates the memory where the IOMMU will log its events to */
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200705static int __init alloc_ppr_log(struct amd_iommu *iommu)
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100706{
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200707 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
708 get_order(PPR_LOG_SIZE));
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100709
Joerg Roedelf2c2db52015-10-20 17:33:42 +0200710 return iommu->ppr_log ? 0 : -ENOMEM;
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100711}
712
713static void iommu_enable_ppr_log(struct amd_iommu *iommu)
714{
715 u64 entry;
716
717 if (iommu->ppr_log == NULL)
718 return;
719
Tom Lendacky2543a782017-07-17 16:10:24 -0500720 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
Joerg Roedel1a29ac02011-11-10 15:41:40 +0100721
722 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
723 &entry, sizeof(entry));
724
725 /* set head and tail to zero manually */
726 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
727 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
728
729 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
730 iommu_feature_enable(iommu, CONTROL_PPR_EN);
731}
732
733static void __init free_ppr_log(struct amd_iommu *iommu)
734{
735 if (iommu->ppr_log == NULL)
736 return;
737
738 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
739}
740
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500741static void free_ga_log(struct amd_iommu *iommu)
742{
743#ifdef CONFIG_IRQ_REMAP
744 if (iommu->ga_log)
745 free_pages((unsigned long)iommu->ga_log,
746 get_order(GA_LOG_SIZE));
747 if (iommu->ga_log_tail)
748 free_pages((unsigned long)iommu->ga_log_tail,
749 get_order(8));
750#endif
751}
752
753static int iommu_ga_log_enable(struct amd_iommu *iommu)
754{
755#ifdef CONFIG_IRQ_REMAP
756 u32 status, i;
757
758 if (!iommu->ga_log)
759 return -EINVAL;
760
761 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
762
763 /* Check if already running */
764 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
765 return 0;
766
767 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
768 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
769
770 for (i = 0; i < LOOP_TIMEOUT; ++i) {
771 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
772 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
773 break;
774 }
775
776 if (i >= LOOP_TIMEOUT)
777 return -EINVAL;
778#endif /* CONFIG_IRQ_REMAP */
779 return 0;
780}
781
782#ifdef CONFIG_IRQ_REMAP
783static int iommu_init_ga_log(struct amd_iommu *iommu)
784{
785 u64 entry;
786
787 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
788 return 0;
789
790 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
791 get_order(GA_LOG_SIZE));
792 if (!iommu->ga_log)
793 goto err_out;
794
795 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
796 get_order(8));
797 if (!iommu->ga_log_tail)
798 goto err_out;
799
Tom Lendacky2543a782017-07-17 16:10:24 -0500800 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500801 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
802 &entry, sizeof(entry));
Filippo Sironiab99be42018-11-12 12:26:30 +0000803 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
804 (BIT_ULL(52)-1)) & ~7ULL;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -0500805 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
806 &entry, sizeof(entry));
807 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
808 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
809
810 return 0;
811err_out:
812 free_ga_log(iommu);
813 return -EINVAL;
814}
815#endif /* CONFIG_IRQ_REMAP */
816
817static int iommu_init_ga(struct amd_iommu *iommu)
818{
819 int ret = 0;
820
821#ifdef CONFIG_IRQ_REMAP
822 /* Note: We have already checked GASup from IVRS table.
823 * Now, we need to make sure that GAMSup is set.
824 */
825 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
826 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
827 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
828
829 ret = iommu_init_ga_log(iommu);
830#endif /* CONFIG_IRQ_REMAP */
831
832 return ret;
833}
834
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -0500835static void iommu_enable_xt(struct amd_iommu *iommu)
836{
837#ifdef CONFIG_IRQ_REMAP
838 /*
839 * XT mode (32-bit APIC destination ID) requires
840 * GA mode (128-bit IRTE support) as a prerequisite.
841 */
842 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
843 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
844 iommu_feature_enable(iommu, CONTROL_XT_EN);
845#endif /* CONFIG_IRQ_REMAP */
846}
847
Joerg Roedelcbc33a92011-11-25 11:41:31 +0100848static void iommu_enable_gt(struct amd_iommu *iommu)
849{
850 if (!iommu_feature(iommu, FEATURE_GT))
851 return;
852
853 iommu_feature_enable(iommu, CONTROL_GT_EN);
854}
855
Joerg Roedelb65233a2008-07-11 17:14:21 +0200856/* sets a specific bit in the device table entry. */
Joerg Roedel3566b772008-06-26 21:27:46 +0200857static void set_dev_entry_bit(u16 devid, u8 bit)
858{
Joerg Roedelee6c2862011-11-09 12:06:03 +0100859 int i = (bit >> 6) & 0x03;
860 int _bit = bit & 0x3f;
Joerg Roedel3566b772008-06-26 21:27:46 +0200861
Joerg Roedelee6c2862011-11-09 12:06:03 +0100862 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
Joerg Roedel3566b772008-06-26 21:27:46 +0200863}
864
Joerg Roedelc5cca142009-10-09 18:31:20 +0200865static int get_dev_entry_bit(u16 devid, u8 bit)
866{
Joerg Roedelee6c2862011-11-09 12:06:03 +0100867 int i = (bit >> 6) & 0x03;
868 int _bit = bit & 0x3f;
Joerg Roedelc5cca142009-10-09 18:31:20 +0200869
Joerg Roedelee6c2862011-11-09 12:06:03 +0100870 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
Joerg Roedelc5cca142009-10-09 18:31:20 +0200871}
872
873
Baoquan He45a01c42017-08-09 16:33:37 +0800874static bool copy_device_table(void)
875{
Joerg Roedelae162ef2017-08-19 00:28:02 +0200876 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
Baoquan He45a01c42017-08-09 16:33:37 +0800877 struct dev_table_entry *old_devtb = NULL;
878 u32 lo, hi, devid, old_devtb_size;
879 phys_addr_t old_devtb_phys;
Baoquan He45a01c42017-08-09 16:33:37 +0800880 struct amd_iommu *iommu;
Baoquan He53019a92017-08-09 16:33:39 +0800881 u16 dom_id, dte_v, irq_v;
Baoquan He45a01c42017-08-09 16:33:37 +0800882 gfp_t gfp_flag;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800883 u64 tmp;
Baoquan He45a01c42017-08-09 16:33:37 +0800884
Baoquan He3ac3e5ee2017-08-09 16:33:38 +0800885 if (!amd_iommu_pre_enabled)
886 return false;
Baoquan He45a01c42017-08-09 16:33:37 +0800887
888 pr_warn("Translation is already enabled - trying to copy translation structures\n");
889 for_each_iommu(iommu) {
890 /* All IOMMUs should use the same device table with the same size */
891 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
892 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
893 entry = (((u64) hi) << 32) + lo;
894 if (last_entry && last_entry != entry) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530895 pr_err("IOMMU:%d should use the same dev table as others!\n",
Baoquan He45a01c42017-08-09 16:33:37 +0800896 iommu->index);
897 return false;
898 }
899 last_entry = entry;
900
901 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
902 if (old_devtb_size != dev_table_size) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530903 pr_err("The device table size of IOMMU:%d is not expected!\n",
Baoquan He45a01c42017-08-09 16:33:37 +0800904 iommu->index);
905 return false;
906 }
907 }
908
Lianbo Jiang87801582018-09-30 11:10:32 +0800909 /*
910 * When SME is enabled in the first kernel, the entry includes the
911 * memory encryption mask(sme_me_mask), we must remove the memory
912 * encryption mask to obtain the true physical address in kdump kernel.
913 */
914 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
915
Baoquan Heb3367812017-08-09 16:33:42 +0800916 if (old_devtb_phys >= 0x100000000ULL) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530917 pr_err("The address of old device table is above 4G, not trustworthy!\n");
Baoquan Heb3367812017-08-09 16:33:42 +0800918 return false;
919 }
Lianbo Jiang87801582018-09-30 11:10:32 +0800920 old_devtb = (sme_active() && is_kdump_kernel())
921 ? (__force void *)ioremap_encrypted(old_devtb_phys,
922 dev_table_size)
923 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
924
Baoquan He45a01c42017-08-09 16:33:37 +0800925 if (!old_devtb)
926 return false;
927
Baoquan Heb3367812017-08-09 16:33:42 +0800928 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
Baoquan He45a01c42017-08-09 16:33:37 +0800929 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
930 get_order(dev_table_size));
931 if (old_dev_tbl_cpy == NULL) {
Arvind Yadav3c6bae62017-09-26 13:07:46 +0530932 pr_err("Failed to allocate memory for copying old device table!\n");
Baoquan He45a01c42017-08-09 16:33:37 +0800933 return false;
934 }
935
936 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
937 old_dev_tbl_cpy[devid] = old_devtb[devid];
938 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
939 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
Baoquan He53019a92017-08-09 16:33:39 +0800940
941 if (dte_v && dom_id) {
942 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
943 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
Baoquan He45a01c42017-08-09 16:33:37 +0800944 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
Baoquan Hedaae2d22017-08-09 16:33:43 +0800945 /* If gcr3 table existed, mask it out */
946 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
947 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
948 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
949 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
950 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
951 tmp |= DTE_FLAG_GV;
952 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
953 }
Baoquan He53019a92017-08-09 16:33:39 +0800954 }
955
956 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
957 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
958 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
959 if (irq_v && (int_ctl || int_tab_len)) {
960 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
961 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
962 pr_err("Wrong old irq remapping flag: %#x\n", devid);
963 return false;
964 }
965
966 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
967 }
Baoquan He45a01c42017-08-09 16:33:37 +0800968 }
969 memunmap(old_devtb);
970
971 return true;
972}
973
Joerg Roedelc5cca142009-10-09 18:31:20 +0200974void amd_iommu_apply_erratum_63(u16 devid)
975{
976 int sysmgt;
977
978 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
979 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
980
981 if (sysmgt == 0x01)
982 set_dev_entry_bit(devid, DEV_ENTRY_IW);
983}
984
Joerg Roedel5ff47892008-07-14 20:11:18 +0200985/* Writes the specific IOMMU for a device into the rlookup table */
986static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
987{
988 amd_iommu_rlookup_table[devid] = iommu;
989}
990
Joerg Roedelb65233a2008-07-11 17:14:21 +0200991/*
992 * This function takes the device specific flags read from the ACPI
993 * table and sets up the device table entry with that information
994 */
Joerg Roedel5ff47892008-07-14 20:11:18 +0200995static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
996 u16 devid, u32 flags, u32 ext_flags)
Joerg Roedel3566b772008-06-26 21:27:46 +0200997{
998 if (flags & ACPI_DEVFLAG_INITPASS)
999 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1000 if (flags & ACPI_DEVFLAG_EXTINT)
1001 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1002 if (flags & ACPI_DEVFLAG_NMI)
1003 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1004 if (flags & ACPI_DEVFLAG_SYSMGT1)
1005 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1006 if (flags & ACPI_DEVFLAG_SYSMGT2)
1007 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1008 if (flags & ACPI_DEVFLAG_LINT0)
1009 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1010 if (flags & ACPI_DEVFLAG_LINT1)
1011 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
Joerg Roedel3566b772008-06-26 21:27:46 +02001012
Joerg Roedelc5cca142009-10-09 18:31:20 +02001013 amd_iommu_apply_erratum_63(devid);
1014
Joerg Roedel5ff47892008-07-14 20:11:18 +02001015 set_iommu_for_device(iommu, devid);
Joerg Roedel3566b772008-06-26 21:27:46 +02001016}
1017
Joerg Roedelc50e3242014-09-09 15:59:37 +02001018static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
Joerg Roedel6efed632012-06-14 15:52:58 +02001019{
1020 struct devid_map *entry;
1021 struct list_head *list;
1022
Joerg Roedel31cff672013-04-09 16:53:58 +02001023 if (type == IVHD_SPECIAL_IOAPIC)
1024 list = &ioapic_map;
1025 else if (type == IVHD_SPECIAL_HPET)
1026 list = &hpet_map;
1027 else
Joerg Roedel6efed632012-06-14 15:52:58 +02001028 return -EINVAL;
1029
Joerg Roedel31cff672013-04-09 16:53:58 +02001030 list_for_each_entry(entry, list, list) {
1031 if (!(entry->id == id && entry->cmd_line))
1032 continue;
1033
Joerg Roedel101fa032018-11-27 16:22:31 +01001034 pr_info("Command-line override present for %s id %d - ignoring\n",
Joerg Roedel31cff672013-04-09 16:53:58 +02001035 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1036
Joerg Roedelc50e3242014-09-09 15:59:37 +02001037 *devid = entry->devid;
1038
Joerg Roedel31cff672013-04-09 16:53:58 +02001039 return 0;
1040 }
1041
Joerg Roedel6efed632012-06-14 15:52:58 +02001042 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1043 if (!entry)
1044 return -ENOMEM;
1045
Joerg Roedel31cff672013-04-09 16:53:58 +02001046 entry->id = id;
Joerg Roedelc50e3242014-09-09 15:59:37 +02001047 entry->devid = *devid;
Joerg Roedel31cff672013-04-09 16:53:58 +02001048 entry->cmd_line = cmd_line;
Joerg Roedel6efed632012-06-14 15:52:58 +02001049
1050 list_add_tail(&entry->list, list);
1051
1052 return 0;
1053}
1054
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001055static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1056 bool cmd_line)
1057{
1058 struct acpihid_map_entry *entry;
1059 struct list_head *list = &acpihid_map;
1060
1061 list_for_each_entry(entry, list, list) {
1062 if (strcmp(entry->hid, hid) ||
1063 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1064 !entry->cmd_line)
1065 continue;
1066
Joerg Roedel101fa032018-11-27 16:22:31 +01001067 pr_info("Command-line override for hid:%s uid:%s\n",
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001068 hid, uid);
1069 *devid = entry->devid;
1070 return 0;
1071 }
1072
1073 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1074 if (!entry)
1075 return -ENOMEM;
1076
1077 memcpy(entry->uid, uid, strlen(uid));
1078 memcpy(entry->hid, hid, strlen(hid));
1079 entry->devid = *devid;
1080 entry->cmd_line = cmd_line;
1081 entry->root_devid = (entry->devid & (~0x7));
1082
Joerg Roedel101fa032018-11-27 16:22:31 +01001083 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001084 entry->cmd_line ? "cmd" : "ivrs",
1085 entry->hid, entry->uid, entry->root_devid);
1086
1087 list_add_tail(&entry->list, list);
1088 return 0;
1089}
1090
Joerg Roedel235dacb2013-04-09 17:53:14 +02001091static int __init add_early_maps(void)
1092{
1093 int i, ret;
1094
1095 for (i = 0; i < early_ioapic_map_size; ++i) {
1096 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1097 early_ioapic_map[i].id,
Joerg Roedelc50e3242014-09-09 15:59:37 +02001098 &early_ioapic_map[i].devid,
Joerg Roedel235dacb2013-04-09 17:53:14 +02001099 early_ioapic_map[i].cmd_line);
1100 if (ret)
1101 return ret;
1102 }
1103
1104 for (i = 0; i < early_hpet_map_size; ++i) {
1105 ret = add_special_device(IVHD_SPECIAL_HPET,
1106 early_hpet_map[i].id,
Joerg Roedelc50e3242014-09-09 15:59:37 +02001107 &early_hpet_map[i].devid,
Joerg Roedel235dacb2013-04-09 17:53:14 +02001108 early_hpet_map[i].cmd_line);
1109 if (ret)
1110 return ret;
1111 }
1112
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001113 for (i = 0; i < early_acpihid_map_size; ++i) {
1114 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1115 early_acpihid_map[i].uid,
1116 &early_acpihid_map[i].devid,
1117 early_acpihid_map[i].cmd_line);
1118 if (ret)
1119 return ret;
1120 }
1121
Joerg Roedel235dacb2013-04-09 17:53:14 +02001122 return 0;
1123}
1124
Joerg Roedelb65233a2008-07-11 17:14:21 +02001125/*
Frank Arnolddf805ab2012-08-27 19:21:04 +02001126 * Reads the device exclusion range from ACPI and initializes the IOMMU with
Joerg Roedelb65233a2008-07-11 17:14:21 +02001127 * it
1128 */
Joerg Roedel3566b772008-06-26 21:27:46 +02001129static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1130{
1131 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1132
1133 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1134 return;
1135
1136 if (iommu) {
Joerg Roedelb65233a2008-07-11 17:14:21 +02001137 /*
1138 * We only can configure exclusion ranges per IOMMU, not
1139 * per device. But we can enable the exclusion range per
1140 * device. This is done here
1141 */
Su Friendy2c16c9f2014-05-07 13:54:52 +08001142 set_dev_entry_bit(devid, DEV_ENTRY_EX);
Joerg Roedel3566b772008-06-26 21:27:46 +02001143 iommu->exclusion_start = m->range_start;
1144 iommu->exclusion_length = m->range_length;
1145 }
1146}
1147
Joerg Roedelb65233a2008-07-11 17:14:21 +02001148/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02001149 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1150 * initializes the hardware and our data structures with it.
1151 */
Joerg Roedel6efed632012-06-14 15:52:58 +02001152static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001153 struct ivhd_header *h)
1154{
1155 u8 *p = (u8 *)h;
1156 u8 *end = p, flags = 0;
Joerg Roedel0de66d52011-06-06 16:04:02 +02001157 u16 devid = 0, devid_start = 0, devid_to = 0;
1158 u32 dev_i, ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001159 bool alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001160 struct ivhd_entry *e;
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001161 u32 ivhd_size;
Joerg Roedel235dacb2013-04-09 17:53:14 +02001162 int ret;
1163
1164
1165 ret = add_early_maps();
1166 if (ret)
1167 return ret;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001168
1169 /*
Joerg Roedele9bf5192010-09-20 14:33:07 +02001170 * First save the recommended feature enable bits from ACPI
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001171 */
Joerg Roedele9bf5192010-09-20 14:33:07 +02001172 iommu->acpi_flags = h->flags;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001173
1174 /*
1175 * Done. Now parse the device entries
1176 */
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001177 ivhd_size = get_ivhd_header_size(h);
1178 if (!ivhd_size) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001179 pr_err("Unsupported IVHD type %#x\n", h->type);
Suravee Suthikulpanitac7ccf62016-04-01 09:05:58 -04001180 return -EINVAL;
1181 }
1182
1183 p += ivhd_size;
1184
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001185 end += h->length;
1186
Joerg Roedel42a698f2009-05-20 15:41:28 +02001187
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001188 while (p < end) {
1189 e = (struct ivhd_entry *)p;
1190 switch (e->type) {
1191 case IVHD_DEV_ALL:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001192
Joerg Roedel226e8892015-10-20 17:33:44 +02001193 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
Joerg Roedel42a698f2009-05-20 15:41:28 +02001194
Joerg Roedel226e8892015-10-20 17:33:44 +02001195 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1196 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001197 break;
1198 case IVHD_DEV_SELECT:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001199
1200 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1201 "flags: %02x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001202 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001203 PCI_SLOT(e->devid),
1204 PCI_FUNC(e->devid),
1205 e->flags);
1206
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001207 devid = e->devid;
Joerg Roedel5ff47892008-07-14 20:11:18 +02001208 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001209 break;
1210 case IVHD_DEV_SELECT_RANGE_START:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001211
1212 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1213 "devid: %02x:%02x.%x flags: %02x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001214 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001215 PCI_SLOT(e->devid),
1216 PCI_FUNC(e->devid),
1217 e->flags);
1218
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001219 devid_start = e->devid;
1220 flags = e->flags;
1221 ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001222 alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001223 break;
1224 case IVHD_DEV_ALIAS:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001225
1226 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1227 "flags: %02x devid_to: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001228 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001229 PCI_SLOT(e->devid),
1230 PCI_FUNC(e->devid),
1231 e->flags,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001232 PCI_BUS_NUM(e->ext >> 8),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001233 PCI_SLOT(e->ext >> 8),
1234 PCI_FUNC(e->ext >> 8));
1235
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001236 devid = e->devid;
1237 devid_to = e->ext >> 8;
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001238 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
Neil Turton7455aab2009-05-14 14:08:11 +01001239 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001240 amd_iommu_alias_table[devid] = devid_to;
1241 break;
1242 case IVHD_DEV_ALIAS_RANGE:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001243
1244 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1245 "devid: %02x:%02x.%x flags: %02x "
1246 "devid_to: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001247 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001248 PCI_SLOT(e->devid),
1249 PCI_FUNC(e->devid),
1250 e->flags,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001251 PCI_BUS_NUM(e->ext >> 8),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001252 PCI_SLOT(e->ext >> 8),
1253 PCI_FUNC(e->ext >> 8));
1254
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001255 devid_start = e->devid;
1256 flags = e->flags;
1257 devid_to = e->ext >> 8;
1258 ext_flags = 0;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001259 alias = true;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001260 break;
1261 case IVHD_DEV_EXT_SELECT:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001262
1263 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1264 "flags: %02x ext: %08x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001265 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001266 PCI_SLOT(e->devid),
1267 PCI_FUNC(e->devid),
1268 e->flags, e->ext);
1269
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001270 devid = e->devid;
Joerg Roedel5ff47892008-07-14 20:11:18 +02001271 set_dev_entry_from_acpi(iommu, devid, e->flags,
1272 e->ext);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001273 break;
1274 case IVHD_DEV_EXT_SELECT_RANGE:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001275
1276 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1277 "%02x:%02x.%x flags: %02x ext: %08x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001278 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001279 PCI_SLOT(e->devid),
1280 PCI_FUNC(e->devid),
1281 e->flags, e->ext);
1282
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001283 devid_start = e->devid;
1284 flags = e->flags;
1285 ext_flags = e->ext;
Joerg Roedel58a3bee2008-07-11 17:14:30 +02001286 alias = false;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001287 break;
1288 case IVHD_DEV_RANGE_END:
Joerg Roedel42a698f2009-05-20 15:41:28 +02001289
1290 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001291 PCI_BUS_NUM(e->devid),
Joerg Roedel42a698f2009-05-20 15:41:28 +02001292 PCI_SLOT(e->devid),
1293 PCI_FUNC(e->devid));
1294
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001295 devid = e->devid;
1296 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001297 if (alias) {
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001298 amd_iommu_alias_table[dev_i] = devid_to;
Joerg Roedel7a6a3a02009-07-02 12:23:23 +02001299 set_dev_entry_from_acpi(iommu,
1300 devid_to, flags, ext_flags);
1301 }
1302 set_dev_entry_from_acpi(iommu, dev_i,
1303 flags, ext_flags);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001304 }
1305 break;
Joerg Roedel6efed632012-06-14 15:52:58 +02001306 case IVHD_DEV_SPECIAL: {
1307 u8 handle, type;
1308 const char *var;
1309 u16 devid;
1310 int ret;
1311
1312 handle = e->ext & 0xff;
1313 devid = (e->ext >> 8) & 0xffff;
1314 type = (e->ext >> 24) & 0xff;
1315
1316 if (type == IVHD_SPECIAL_IOAPIC)
1317 var = "IOAPIC";
1318 else if (type == IVHD_SPECIAL_HPET)
1319 var = "HPET";
1320 else
1321 var = "UNKNOWN";
1322
1323 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1324 var, (int)handle,
Shuah Khanc5081cd2013-02-27 17:07:19 -07001325 PCI_BUS_NUM(devid),
Joerg Roedel6efed632012-06-14 15:52:58 +02001326 PCI_SLOT(devid),
1327 PCI_FUNC(devid));
1328
Joerg Roedelc50e3242014-09-09 15:59:37 +02001329 ret = add_special_device(type, handle, &devid, false);
Joerg Roedel6efed632012-06-14 15:52:58 +02001330 if (ret)
1331 return ret;
Joerg Roedelc50e3242014-09-09 15:59:37 +02001332
1333 /*
1334 * add_special_device might update the devid in case a
1335 * command-line override is present. So call
1336 * set_dev_entry_from_acpi after add_special_device.
1337 */
1338 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1339
Joerg Roedel6efed632012-06-14 15:52:58 +02001340 break;
1341 }
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001342 case IVHD_DEV_ACPI_HID: {
1343 u16 devid;
1344 u8 hid[ACPIHID_HID_LEN] = {0};
1345 u8 uid[ACPIHID_UID_LEN] = {0};
1346 int ret;
1347
1348 if (h->type != 0x40) {
1349 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1350 e->type);
1351 break;
1352 }
1353
1354 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1355 hid[ACPIHID_HID_LEN - 1] = '\0';
1356
1357 if (!(*hid)) {
1358 pr_err(FW_BUG "Invalid HID.\n");
1359 break;
1360 }
1361
1362 switch (e->uidf) {
1363 case UID_NOT_PRESENT:
1364
1365 if (e->uidl != 0)
1366 pr_warn(FW_BUG "Invalid UID length.\n");
1367
1368 break;
1369 case UID_IS_INTEGER:
1370
1371 sprintf(uid, "%d", e->uid);
1372
1373 break;
1374 case UID_IS_CHARACTER:
1375
1376 memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1377 uid[ACPIHID_UID_LEN - 1] = '\0';
1378
1379 break;
1380 default:
1381 break;
1382 }
1383
Nicolas Iooss6082ee72016-06-26 10:33:29 +02001384 devid = e->devid;
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001385 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1386 hid, uid,
1387 PCI_BUS_NUM(devid),
1388 PCI_SLOT(devid),
1389 PCI_FUNC(devid));
1390
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -04001391 flags = e->flags;
1392
1393 ret = add_acpi_hid_device(hid, uid, &devid, false);
1394 if (ret)
1395 return ret;
1396
1397 /*
1398 * add_special_device might update the devid in case a
1399 * command-line override is present. So call
1400 * set_dev_entry_from_acpi after add_special_device.
1401 */
1402 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1403
1404 break;
1405 }
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001406 default:
1407 break;
1408 }
1409
Joerg Roedelb514e552008-09-17 17:14:27 +02001410 p += ivhd_entry_length(p);
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001411 }
Joerg Roedel6efed632012-06-14 15:52:58 +02001412
1413 return 0;
Joerg Roedel5d0c8e42008-06-26 21:27:47 +02001414}
1415
Joerg Roedele47d4022008-06-26 21:27:48 +02001416static void __init free_iommu_one(struct amd_iommu *iommu)
1417{
1418 free_command_buffer(iommu);
Joerg Roedel335503e2008-09-05 14:29:07 +02001419 free_event_buffer(iommu);
Joerg Roedel1a29ac02011-11-10 15:41:40 +01001420 free_ppr_log(iommu);
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001421 free_ga_log(iommu);
Joerg Roedele47d4022008-06-26 21:27:48 +02001422 iommu_unmap_mmio_space(iommu);
1423}
1424
1425static void __init free_iommu_all(void)
1426{
1427 struct amd_iommu *iommu, *next;
1428
Joerg Roedel3bd22172009-05-04 15:06:20 +02001429 for_each_iommu_safe(iommu, next) {
Joerg Roedele47d4022008-06-26 21:27:48 +02001430 list_del(&iommu->list);
1431 free_iommu_one(iommu);
1432 kfree(iommu);
1433 }
1434}
1435
Joerg Roedelb65233a2008-07-11 17:14:21 +02001436/*
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001437 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1438 * Workaround:
1439 * BIOS should disable L2B micellaneous clock gating by setting
1440 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1441 */
Nikola Pajkovskye2f1a3b2013-02-26 16:12:05 +01001442static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001443{
1444 u32 value;
1445
1446 if ((boot_cpu_data.x86 != 0x15) ||
1447 (boot_cpu_data.x86_model < 0x10) ||
1448 (boot_cpu_data.x86_model > 0x1f))
1449 return;
1450
1451 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1452 pci_read_config_dword(iommu->dev, 0xf4, &value);
1453
1454 if (value & BIT(2))
1455 return;
1456
1457 /* Select NB indirect register 0x90 and enable writing */
1458 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1459
1460 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001461 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001462
1463 /* Clear the enable writing bit */
1464 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1465}
1466
1467/*
Jay Cornwall358875f2016-02-10 15:48:01 -06001468 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1469 * Workaround:
1470 * BIOS should enable ATS write permission check by setting
1471 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1472 */
1473static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1474{
1475 u32 value;
1476
1477 if ((boot_cpu_data.x86 != 0x15) ||
1478 (boot_cpu_data.x86_model < 0x30) ||
1479 (boot_cpu_data.x86_model > 0x3f))
1480 return;
1481
1482 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1483 value = iommu_read_l2(iommu, 0x47);
1484
1485 if (value & BIT(0))
1486 return;
1487
1488 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1489 iommu_write_l2(iommu, 0x47, value | BIT(0));
1490
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001491 pci_info(iommu->dev, "Applying ATS write check workaround\n");
Jay Cornwall358875f2016-02-10 15:48:01 -06001492}
1493
1494/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02001495 * This function clues the initialization function for one IOMMU
1496 * together and also allocates the command buffer and programs the
1497 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1498 */
Joerg Roedele47d4022008-06-26 21:27:48 +02001499static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1500{
Joerg Roedel6efed632012-06-14 15:52:58 +02001501 int ret;
1502
Scott Wood27790392018-01-21 03:28:54 -06001503 raw_spin_lock_init(&iommu->lock);
Joerg Roedelbb527772009-11-20 14:31:51 +01001504
1505 /* Add IOMMU to internal data structures */
Joerg Roedele47d4022008-06-26 21:27:48 +02001506 list_add_tail(&iommu->list, &amd_iommu_list);
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06001507 iommu->index = amd_iommus_present++;
Joerg Roedelbb527772009-11-20 14:31:51 +01001508
1509 if (unlikely(iommu->index >= MAX_IOMMUS)) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001510 WARN(1, "System has more IOMMUs than supported by this driver\n");
Joerg Roedelbb527772009-11-20 14:31:51 +01001511 return -ENOSYS;
1512 }
1513
1514 /* Index is fine - add IOMMU to the array */
1515 amd_iommus[iommu->index] = iommu;
Joerg Roedele47d4022008-06-26 21:27:48 +02001516
1517 /*
1518 * Copy data from ACPI table entry to the iommu struct
1519 */
Joerg Roedel23c742d2012-06-12 11:47:34 +02001520 iommu->devid = h->devid;
Joerg Roedele47d4022008-06-26 21:27:48 +02001521 iommu->cap_ptr = h->cap_ptr;
Joerg Roedelee893c22008-09-08 14:48:04 +02001522 iommu->pci_seg = h->pci_seg;
Joerg Roedele47d4022008-06-26 21:27:48 +02001523 iommu->mmio_phys = h->mmio_phys;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001524
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001525 switch (h->type) {
1526 case 0x10:
1527 /* Check if IVHD EFR contains proper max banks/counters */
1528 if ((h->efr_attr != 0) &&
1529 ((h->efr_attr & (0xF << 13)) != 0) &&
1530 ((h->efr_attr & (0x3F << 17)) != 0))
1531 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1532 else
1533 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001534 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1535 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05001536 if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
1537 amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001538 break;
1539 case 0x11:
1540 case 0x40:
1541 if (h->efr_reg & (1 << 9))
1542 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1543 else
1544 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001545 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1546 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05001547 if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
1548 amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
Suravee Suthikulpanit7d7d38a2016-04-01 09:05:57 -04001549 break;
1550 default:
1551 return -EINVAL;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001552 }
1553
1554 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1555 iommu->mmio_phys_end);
Joerg Roedele47d4022008-06-26 21:27:48 +02001556 if (!iommu->mmio_base)
1557 return -ENOMEM;
1558
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001559 if (alloc_command_buffer(iommu))
Joerg Roedele47d4022008-06-26 21:27:48 +02001560 return -ENOMEM;
1561
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001562 if (alloc_event_buffer(iommu))
Joerg Roedel335503e2008-09-05 14:29:07 +02001563 return -ENOMEM;
1564
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001565 iommu->int_enabled = false;
1566
Baoquan He4c232a72017-08-09 16:33:33 +08001567 init_translation_status(iommu);
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08001568 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1569 iommu_disable(iommu);
1570 clear_translation_pre_enabled(iommu);
1571 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1572 iommu->index);
1573 }
1574 if (amd_iommu_pre_enabled)
1575 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
Baoquan He4c232a72017-08-09 16:33:33 +08001576
Joerg Roedel6efed632012-06-14 15:52:58 +02001577 ret = init_iommu_from_acpi(iommu, h);
1578 if (ret)
1579 return ret;
Joerg Roedelf6fec002012-06-21 16:51:25 +02001580
Jiang Liu7c71d302015-04-13 14:11:33 +08001581 ret = amd_iommu_create_irq_domain(iommu);
1582 if (ret)
1583 return ret;
1584
Joerg Roedelf6fec002012-06-21 16:51:25 +02001585 /*
1586 * Make sure IOMMU is not considered to translate itself. The IVRS
1587 * table tells us so, but this is a lie!
1588 */
1589 amd_iommu_rlookup_table[iommu->devid] = NULL;
1590
Joerg Roedel23c742d2012-06-12 11:47:34 +02001591 return 0;
Joerg Roedele47d4022008-06-26 21:27:48 +02001592}
1593
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001594/**
1595 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1596 * @ivrs Pointer to the IVRS header
1597 *
1598 * This function search through all IVDB of the maximum supported IVHD
1599 */
1600static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1601{
1602 u8 *base = (u8 *)ivrs;
1603 struct ivhd_header *ivhd = (struct ivhd_header *)
1604 (base + IVRS_HEADER_LENGTH);
1605 u8 last_type = ivhd->type;
1606 u16 devid = ivhd->devid;
1607
1608 while (((u8 *)ivhd - base < ivrs->length) &&
1609 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1610 u8 *p = (u8 *) ivhd;
1611
1612 if (ivhd->devid == devid)
1613 last_type = ivhd->type;
1614 ivhd = (struct ivhd_header *)(p + ivhd->length);
1615 }
1616
1617 return last_type;
1618}
1619
Joerg Roedelb65233a2008-07-11 17:14:21 +02001620/*
1621 * Iterates over all IOMMU entries in the ACPI table, allocates the
1622 * IOMMU structure and initializes it with init_iommu_one()
1623 */
Joerg Roedele47d4022008-06-26 21:27:48 +02001624static int __init init_iommu_all(struct acpi_table_header *table)
1625{
1626 u8 *p = (u8 *)table, *end = (u8 *)table;
1627 struct ivhd_header *h;
1628 struct amd_iommu *iommu;
1629 int ret;
1630
Joerg Roedele47d4022008-06-26 21:27:48 +02001631 end += table->length;
1632 p += IVRS_HEADER_LENGTH;
1633
1634 while (p < end) {
1635 h = (struct ivhd_header *)p;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04001636 if (*p == amd_iommu_target_ivhd_type) {
Joerg Roedel9c720412009-05-20 13:53:57 +02001637
Joerg Roedelae908c22009-09-01 16:52:16 +02001638 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
Joerg Roedel9c720412009-05-20 13:53:57 +02001639 "seg: %d flags: %01x info %04x\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -07001640 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
Joerg Roedel9c720412009-05-20 13:53:57 +02001641 PCI_FUNC(h->devid), h->cap_ptr,
1642 h->pci_seg, h->flags, h->info);
1643 DUMP_printk(" mmio-addr: %016llx\n",
1644 h->mmio_phys);
1645
Joerg Roedele47d4022008-06-26 21:27:48 +02001646 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02001647 if (iommu == NULL)
1648 return -ENOMEM;
Joerg Roedel3551a702010-03-01 13:52:19 +01001649
Joerg Roedele47d4022008-06-26 21:27:48 +02001650 ret = init_iommu_one(iommu, h);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02001651 if (ret)
1652 return ret;
Joerg Roedele47d4022008-06-26 21:27:48 +02001653 }
1654 p += h->length;
1655
1656 }
1657 WARN_ON(p != end);
1658
1659 return 0;
1660}
1661
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06001662static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1663 u8 fxn, u64 *value, bool is_write);
Steven L Kinney30861dd2013-06-05 16:11:48 -05001664
1665static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1666{
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001667 struct pci_dev *pdev = iommu->dev;
Steven L Kinney30861dd2013-06-05 16:11:48 -05001668 u64 val = 0xabcd, val2 = 0;
1669
1670 if (!iommu_feature(iommu, FEATURE_PC))
1671 return;
1672
1673 amd_iommu_pc_present = true;
1674
1675 /* Check if the performance counters can be written to */
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06001676 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1677 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
Steven L Kinney30861dd2013-06-05 16:11:48 -05001678 (val != val2)) {
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001679 pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
Steven L Kinney30861dd2013-06-05 16:11:48 -05001680 amd_iommu_pc_present = false;
1681 return;
1682 }
1683
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001684 pci_info(pdev, "IOMMU performance counters supported\n");
Steven L Kinney30861dd2013-06-05 16:11:48 -05001685
1686 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1687 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1688 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1689}
1690
Alex Williamson066f2e92014-06-12 16:12:37 -06001691static ssize_t amd_iommu_show_cap(struct device *dev,
1692 struct device_attribute *attr,
1693 char *buf)
1694{
Joerg Roedelb7a42b92017-02-28 13:57:18 +01001695 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
Alex Williamson066f2e92014-06-12 16:12:37 -06001696 return sprintf(buf, "%x\n", iommu->cap);
1697}
1698static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1699
1700static ssize_t amd_iommu_show_features(struct device *dev,
1701 struct device_attribute *attr,
1702 char *buf)
1703{
Joerg Roedelb7a42b92017-02-28 13:57:18 +01001704 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
Alex Williamson066f2e92014-06-12 16:12:37 -06001705 return sprintf(buf, "%llx\n", iommu->features);
1706}
1707static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1708
1709static struct attribute *amd_iommu_attrs[] = {
1710 &dev_attr_cap.attr,
1711 &dev_attr_features.attr,
1712 NULL,
1713};
1714
1715static struct attribute_group amd_iommu_group = {
1716 .name = "amd-iommu",
1717 .attrs = amd_iommu_attrs,
1718};
1719
1720static const struct attribute_group *amd_iommu_groups[] = {
1721 &amd_iommu_group,
1722 NULL,
1723};
Steven L Kinney30861dd2013-06-05 16:11:48 -05001724
Joerg Roedel24d2c522018-10-05 12:32:46 +02001725static int __init iommu_init_pci(struct amd_iommu *iommu)
Joerg Roedel23c742d2012-06-12 11:47:34 +02001726{
1727 int cap_ptr = iommu->cap_ptr;
1728 u32 range, misc, low, high;
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001729 int ret;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001730
Sinan Kayad5bf0f42017-12-19 00:37:47 -05001731 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1732 iommu->devid & 0xff);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001733 if (!iommu->dev)
1734 return -ENODEV;
1735
Jiang Liucbbc00b2015-10-09 22:07:31 +08001736 /* Prevent binding other PCI device drivers to IOMMU devices */
1737 iommu->dev->match_driver = false;
1738
Joerg Roedel23c742d2012-06-12 11:47:34 +02001739 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1740 &iommu->cap);
1741 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1742 &range);
1743 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1744 &misc);
1745
Joerg Roedel23c742d2012-06-12 11:47:34 +02001746 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1747 amd_iommu_iotlb_sup = false;
1748
1749 /* read extended feature bits */
1750 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1751 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1752
1753 iommu->features = ((u64)high << 32) | low;
1754
1755 if (iommu_feature(iommu, FEATURE_GT)) {
1756 int glxval;
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001757 u32 max_pasid;
1758 u64 pasmax;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001759
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001760 pasmax = iommu->features & FEATURE_PASID_MASK;
1761 pasmax >>= FEATURE_PASID_SHIFT;
1762 max_pasid = (1 << (pasmax + 1)) - 1;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001763
Suravee Suthikulpanita919a012014-03-05 18:54:18 -06001764 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1765
1766 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
Joerg Roedel23c742d2012-06-12 11:47:34 +02001767
1768 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1769 glxval >>= FEATURE_GLXVAL_SHIFT;
1770
1771 if (amd_iommu_max_glx_val == -1)
1772 amd_iommu_max_glx_val = glxval;
1773 else
1774 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1775 }
1776
1777 if (iommu_feature(iommu, FEATURE_GT) &&
1778 iommu_feature(iommu, FEATURE_PPR)) {
1779 iommu->is_iommu_v2 = true;
1780 amd_iommu_v2_present = true;
1781 }
1782
Joerg Roedelf2c2db52015-10-20 17:33:42 +02001783 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1784 return -ENOMEM;
Joerg Roedel23c742d2012-06-12 11:47:34 +02001785
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001786 ret = iommu_init_ga(iommu);
1787 if (ret)
1788 return ret;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001789
Joerg Roedel23c742d2012-06-12 11:47:34 +02001790 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1791 amd_iommu_np_cache = true;
1792
Steven L Kinney30861dd2013-06-05 16:11:48 -05001793 init_iommu_perf_ctr(iommu);
1794
Joerg Roedel23c742d2012-06-12 11:47:34 +02001795 if (is_rd890_iommu(iommu->dev)) {
1796 int i, j;
1797
Sinan Kayad5bf0f42017-12-19 00:37:47 -05001798 iommu->root_pdev =
1799 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1800 PCI_DEVFN(0, 0));
Joerg Roedel23c742d2012-06-12 11:47:34 +02001801
1802 /*
1803 * Some rd890 systems may not be fully reconfigured by the
1804 * BIOS, so it's necessary for us to store this information so
1805 * it can be reprogrammed on resume
1806 */
1807 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1808 &iommu->stored_addr_lo);
1809 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1810 &iommu->stored_addr_hi);
1811
1812 /* Low bit locks writes to configuration space */
1813 iommu->stored_addr_lo &= ~1;
1814
1815 for (i = 0; i < 6; i++)
1816 for (j = 0; j < 0x12; j++)
1817 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1818
1819 for (i = 0; i < 0x83; i++)
1820 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1821 }
1822
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001823 amd_iommu_erratum_746_workaround(iommu);
Jay Cornwall358875f2016-02-10 15:48:01 -06001824 amd_iommu_ats_write_check_workaround(iommu);
Suravee Suthikulpanit318fe782013-01-24 13:17:53 -06001825
Joerg Roedel39ab9552017-02-01 16:56:46 +01001826 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1827 amd_iommu_groups, "ivhd%d", iommu->index);
Joerg Roedelb0119e82017-02-01 13:23:08 +01001828 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1829 iommu_device_register(&iommu->iommu);
Alex Williamson066f2e92014-06-12 16:12:37 -06001830
Joerg Roedel23c742d2012-06-12 11:47:34 +02001831 return pci_enable_device(iommu->dev);
1832}
1833
Joerg Roedel4d121c32012-06-14 12:21:55 +02001834static void print_iommu_info(void)
1835{
1836 static const char * const feat_str[] = {
1837 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1838 "IA", "GA", "HE", "PC"
1839 };
1840 struct amd_iommu *iommu;
1841
1842 for_each_iommu(iommu) {
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001843 struct pci_dev *pdev = iommu->dev;
Joerg Roedel4d121c32012-06-14 12:21:55 +02001844 int i;
1845
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001846 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
Joerg Roedel4d121c32012-06-14 12:21:55 +02001847
1848 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06001849 pci_info(pdev, "Extended features (%#llx):\n",
1850 iommu->features);
Joerg Roedel2bd5ed02012-08-10 11:34:08 +02001851 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
Joerg Roedel4d121c32012-06-14 12:21:55 +02001852 if (iommu_feature(iommu, (1ULL << i)))
1853 pr_cont(" %s", feat_str[i]);
1854 }
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001855
1856 if (iommu->features & FEATURE_GAM_VAPIC)
1857 pr_cont(" GA_vAPIC");
1858
Steven L Kinney30861dd2013-06-05 16:11:48 -05001859 pr_cont("\n");
Borislav Petkov500c25e2012-09-28 16:22:26 +02001860 }
Joerg Roedel4d121c32012-06-14 12:21:55 +02001861 }
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001862 if (irq_remapping_enabled) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001863 pr_info("Interrupt remapping enabled\n");
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001864 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
Joerg Roedel101fa032018-11-27 16:22:31 +01001865 pr_info("Virtual APIC enabled\n");
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05001866 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
Joerg Roedel101fa032018-11-27 16:22:31 +01001867 pr_info("X2APIC enabled\n");
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05001868 }
Joerg Roedel4d121c32012-06-14 12:21:55 +02001869}
1870
Joerg Roedel2c0ae172012-06-12 15:59:30 +02001871static int __init amd_iommu_init_pci(void)
Joerg Roedel23c742d2012-06-12 11:47:34 +02001872{
1873 struct amd_iommu *iommu;
1874 int ret = 0;
1875
1876 for_each_iommu(iommu) {
1877 ret = iommu_init_pci(iommu);
1878 if (ret)
1879 break;
1880 }
1881
Joerg Roedel522e5cb72016-07-01 16:42:55 +02001882 /*
1883 * Order is important here to make sure any unity map requirements are
1884 * fulfilled. The unity mappings are created and written to the device
1885 * table during the amd_iommu_init_api() call.
1886 *
1887 * After that we call init_device_table_dma() to make sure any
1888 * uninitialized DTE will block DMA, and in the end we flush the caches
1889 * of all IOMMUs to make sure the changes to the device table are
1890 * active.
1891 */
1892 ret = amd_iommu_init_api();
1893
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02001894 init_device_table_dma();
Joerg Roedel23c742d2012-06-12 11:47:34 +02001895
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02001896 for_each_iommu(iommu)
1897 iommu_flush_all_caches(iommu);
1898
Joerg Roedel3a18404c2015-05-28 18:41:45 +02001899 if (!ret)
1900 print_iommu_info();
Joerg Roedel4d121c32012-06-14 12:21:55 +02001901
Joerg Roedel23c742d2012-06-12 11:47:34 +02001902 return ret;
1903}
1904
Joerg Roedelb65233a2008-07-11 17:14:21 +02001905/****************************************************************************
1906 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001907 * The following functions initialize the MSI interrupts for all IOMMUs
Frank Arnolddf805ab2012-08-27 19:21:04 +02001908 * in the system. It's a bit challenging because there could be multiple
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001909 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1910 * pci_dev.
1911 *
1912 ****************************************************************************/
1913
Joerg Roedel9f800de2009-11-23 12:45:25 +01001914static int iommu_setup_msi(struct amd_iommu *iommu)
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001915{
1916 int r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001917
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001918 r = pci_enable_msi(iommu->dev);
1919 if (r)
1920 return r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001921
Joerg Roedel72fe00f2011-05-10 10:50:42 +02001922 r = request_threaded_irq(iommu->dev->irq,
1923 amd_iommu_int_handler,
1924 amd_iommu_int_thread,
1925 0, "AMD-Vi",
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -05001926 iommu);
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001927
1928 if (r) {
1929 pci_disable_msi(iommu->dev);
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001930 return r;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001931 }
1932
Joerg Roedelfab6afa2009-05-04 18:46:34 +02001933 iommu->int_enabled = true;
Joerg Roedel1a29ac02011-11-10 15:41:40 +01001934
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001935 return 0;
1936}
1937
Joerg Roedel05f92db2009-05-12 09:52:46 +02001938static int iommu_init_msi(struct amd_iommu *iommu)
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001939{
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001940 int ret;
1941
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001942 if (iommu->int_enabled)
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001943 goto enable_faults;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001944
Yijing Wang82fcfc62013-08-08 21:12:36 +08001945 if (iommu->dev->msi_cap)
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001946 ret = iommu_setup_msi(iommu);
1947 else
1948 ret = -ENODEV;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001949
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001950 if (ret)
1951 return ret;
1952
1953enable_faults:
1954 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1955
1956 if (iommu->ppr_log != NULL)
1957 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1958
Suravee Suthikulpanit8bda0cf2016-08-23 13:52:36 -05001959 iommu_ga_log_enable(iommu);
1960
Joerg Roedel9ddd5922012-03-15 16:29:47 +01001961 return 0;
Joerg Roedela80dc3e2008-09-11 16:51:41 +02001962}
1963
1964/****************************************************************************
1965 *
Joerg Roedelb65233a2008-07-11 17:14:21 +02001966 * The next functions belong to the third pass of parsing the ACPI
1967 * table. In this last pass the memory mapping requirements are
Frank Arnolddf805ab2012-08-27 19:21:04 +02001968 * gathered (like exclusion and unity mapping ranges).
Joerg Roedelb65233a2008-07-11 17:14:21 +02001969 *
1970 ****************************************************************************/
1971
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001972static void __init free_unity_maps(void)
1973{
1974 struct unity_map_entry *entry, *next;
1975
1976 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1977 list_del(&entry->list);
1978 kfree(entry);
1979 }
1980}
1981
Joerg Roedelb65233a2008-07-11 17:14:21 +02001982/* called when we find an exclusion range definition in ACPI */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001983static int __init init_exclusion_range(struct ivmd_header *m)
1984{
1985 int i;
1986
1987 switch (m->type) {
1988 case ACPI_IVMD_TYPE:
1989 set_device_exclusion_range(m->devid, m);
1990 break;
1991 case ACPI_IVMD_TYPE_ALL:
Joerg Roedel3a61ec32008-07-25 13:07:50 +02001992 for (i = 0; i <= amd_iommu_last_bdf; ++i)
Joerg Roedelbe2a0222008-06-26 21:27:49 +02001993 set_device_exclusion_range(i, m);
1994 break;
1995 case ACPI_IVMD_TYPE_RANGE:
1996 for (i = m->devid; i <= m->aux; ++i)
1997 set_device_exclusion_range(i, m);
1998 break;
1999 default:
2000 break;
2001 }
2002
2003 return 0;
2004}
2005
Joerg Roedelb65233a2008-07-11 17:14:21 +02002006/* called for unity map ACPI definition */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002007static int __init init_unity_map_range(struct ivmd_header *m)
2008{
Joerg Roedel98f1ad22012-07-06 13:28:37 +02002009 struct unity_map_entry *e = NULL;
Joerg Roedel02acc432009-05-20 16:24:21 +02002010 char *s;
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002011
2012 e = kzalloc(sizeof(*e), GFP_KERNEL);
2013 if (e == NULL)
2014 return -ENOMEM;
2015
2016 switch (m->type) {
2017 default:
Joerg Roedel0bc252f2009-05-22 12:48:05 +02002018 kfree(e);
2019 return 0;
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002020 case ACPI_IVMD_TYPE:
Joerg Roedel02acc432009-05-20 16:24:21 +02002021 s = "IVMD_TYPEi\t\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002022 e->devid_start = e->devid_end = m->devid;
2023 break;
2024 case ACPI_IVMD_TYPE_ALL:
Joerg Roedel02acc432009-05-20 16:24:21 +02002025 s = "IVMD_TYPE_ALL\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002026 e->devid_start = 0;
2027 e->devid_end = amd_iommu_last_bdf;
2028 break;
2029 case ACPI_IVMD_TYPE_RANGE:
Joerg Roedel02acc432009-05-20 16:24:21 +02002030 s = "IVMD_TYPE_RANGE\t\t";
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002031 e->devid_start = m->devid;
2032 e->devid_end = m->aux;
2033 break;
2034 }
2035 e->address_start = PAGE_ALIGN(m->range_start);
2036 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2037 e->prot = m->flags >> 1;
2038
Joerg Roedel02acc432009-05-20 16:24:21 +02002039 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2040 " range_start: %016llx range_end: %016llx flags: %x\n", s,
Shuah Khanc5081cd2013-02-27 17:07:19 -07002041 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2042 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
Joerg Roedel02acc432009-05-20 16:24:21 +02002043 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2044 e->address_start, e->address_end, m->flags);
2045
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002046 list_add_tail(&e->list, &amd_iommu_unity_map);
2047
2048 return 0;
2049}
2050
Joerg Roedelb65233a2008-07-11 17:14:21 +02002051/* iterates over all memory definitions we find in the ACPI table */
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002052static int __init init_memory_definitions(struct acpi_table_header *table)
2053{
2054 u8 *p = (u8 *)table, *end = (u8 *)table;
2055 struct ivmd_header *m;
2056
Joerg Roedelbe2a0222008-06-26 21:27:49 +02002057 end += table->length;
2058 p += IVRS_HEADER_LENGTH;
2059
2060 while (p < end) {
2061 m = (struct ivmd_header *)p;
2062 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2063 init_exclusion_range(m);
2064 else if (m->flags & IVMD_FLAG_UNITY_MAP)
2065 init_unity_map_range(m);
2066
2067 p += m->length;
2068 }
2069
2070 return 0;
2071}
2072
Joerg Roedelb65233a2008-07-11 17:14:21 +02002073/*
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002074 * Init the device table to not allow DMA access for devices
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002075 */
Joerg Roedel33f28c52012-06-15 18:03:31 +02002076static void init_device_table_dma(void)
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002077{
Joerg Roedel0de66d52011-06-06 16:04:02 +02002078 u32 devid;
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002079
2080 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2081 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2082 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002083 }
2084}
2085
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002086static void __init uninit_device_table_dma(void)
2087{
2088 u32 devid;
2089
2090 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2091 amd_iommu_dev_table[devid].data[0] = 0ULL;
2092 amd_iommu_dev_table[devid].data[1] = 0ULL;
2093 }
2094}
2095
Joerg Roedel33f28c52012-06-15 18:03:31 +02002096static void init_device_table(void)
2097{
2098 u32 devid;
2099
2100 if (!amd_iommu_irq_remap)
2101 return;
2102
2103 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2104 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2105}
2106
Joerg Roedele9bf5192010-09-20 14:33:07 +02002107static void iommu_init_flags(struct amd_iommu *iommu)
2108{
2109 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2110 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2111 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2112
2113 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2114 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2115 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2116
2117 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2118 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2119 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2120
2121 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2122 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2123 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2124
2125 /*
2126 * make IOMMU memory accesses cache coherent
2127 */
2128 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
Joerg Roedel1456e9d2011-12-22 14:51:53 +01002129
2130 /* Set IOTLB invalidation timeout to 1s */
2131 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
Joerg Roedele9bf5192010-09-20 14:33:07 +02002132}
2133
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002134static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
Joerg Roedel4c894f42010-09-23 15:15:19 +02002135{
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002136 int i, j;
2137 u32 ioc_feature_control;
Joerg Roedelc1bf94e2012-05-31 17:38:11 +02002138 struct pci_dev *pdev = iommu->root_pdev;
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002139
2140 /* RD890 BIOSes may not have completely reconfigured the iommu */
Joerg Roedelc1bf94e2012-05-31 17:38:11 +02002141 if (!is_rd890_iommu(iommu->dev) || !pdev)
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002142 return;
2143
2144 /*
2145 * First, we need to ensure that the iommu is enabled. This is
2146 * controlled by a register in the northbridge
2147 */
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002148
2149 /* Select Northbridge indirect register 0x75 and enable writing */
2150 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2151 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2152
2153 /* Enable the iommu */
2154 if (!(ioc_feature_control & 0x1))
2155 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2156
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002157 /* Restore the iommu BAR */
2158 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2159 iommu->stored_addr_lo);
2160 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2161 iommu->stored_addr_hi);
2162
2163 /* Restore the l1 indirect regs for each of the 6 l1s */
2164 for (i = 0; i < 6; i++)
2165 for (j = 0; j < 0x12; j++)
2166 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2167
2168 /* Restore the l2 indirect regs */
2169 for (i = 0; i < 0x83; i++)
2170 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2171
2172 /* Lock PCI setup registers */
2173 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2174 iommu->stored_addr_lo | 1);
Joerg Roedel4c894f42010-09-23 15:15:19 +02002175}
2176
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002177static void iommu_enable_ga(struct amd_iommu *iommu)
2178{
2179#ifdef CONFIG_IRQ_REMAP
2180 switch (amd_iommu_guest_ir) {
2181 case AMD_IOMMU_GUEST_IR_VAPIC:
2182 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2183 /* Fall through */
2184 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2185 iommu_feature_enable(iommu, CONTROL_GA_EN);
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05002186 iommu->irte_ops = &irte_128_ops;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002187 break;
2188 default:
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05002189 iommu->irte_ops = &irte_32_ops;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002190 break;
2191 }
2192#endif
2193}
2194
Baoquan He78d313c2017-08-09 16:33:34 +08002195static void early_enable_iommu(struct amd_iommu *iommu)
2196{
2197 iommu_disable(iommu);
2198 iommu_init_flags(iommu);
2199 iommu_set_device_table(iommu);
2200 iommu_enable_command_buffer(iommu);
2201 iommu_enable_event_buffer(iommu);
2202 iommu_set_exclusion_range(iommu);
2203 iommu_enable_ga(iommu);
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002204 iommu_enable_xt(iommu);
Baoquan He78d313c2017-08-09 16:33:34 +08002205 iommu_enable(iommu);
2206 iommu_flush_all_caches(iommu);
2207}
2208
Joerg Roedel9f5f5fb2008-08-14 19:55:16 +02002209/*
Joerg Roedelb65233a2008-07-11 17:14:21 +02002210 * This function finally enables all IOMMUs found in the system after
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002211 * they have been initialized.
2212 *
2213 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2214 * the old content of device table entries. Not this case or copy failed,
2215 * just continue as normal kernel does.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002216 */
Joerg Roedel11ee5ac2012-06-12 16:30:06 +02002217static void early_enable_iommus(void)
Joerg Roedel87361972008-06-26 21:28:07 +02002218{
2219 struct amd_iommu *iommu;
2220
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002221
2222 if (!copy_device_table()) {
2223 /*
2224 * If come here because of failure in copying device table from old
2225 * kernel with all IOMMUs enabled, print error message and try to
2226 * free allocated old_dev_tbl_cpy.
2227 */
2228 if (amd_iommu_pre_enabled)
2229 pr_err("Failed to copy DEV table from previous kernel.\n");
2230 if (old_dev_tbl_cpy != NULL)
2231 free_pages((unsigned long)old_dev_tbl_cpy,
2232 get_order(dev_table_size));
2233
2234 for_each_iommu(iommu) {
2235 clear_translation_pre_enabled(iommu);
2236 early_enable_iommu(iommu);
2237 }
2238 } else {
2239 pr_info("Copied DEV table from previous kernel.\n");
2240 free_pages((unsigned long)amd_iommu_dev_table,
2241 get_order(dev_table_size));
2242 amd_iommu_dev_table = old_dev_tbl_cpy;
2243 for_each_iommu(iommu) {
2244 iommu_disable_command_buffer(iommu);
2245 iommu_disable_event_buffer(iommu);
2246 iommu_enable_command_buffer(iommu);
2247 iommu_enable_event_buffer(iommu);
2248 iommu_enable_ga(iommu);
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002249 iommu_enable_xt(iommu);
Baoquan He3ac3e5ee2017-08-09 16:33:38 +08002250 iommu_set_device_table(iommu);
2251 iommu_flush_all_caches(iommu);
2252 }
Joerg Roedel87361972008-06-26 21:28:07 +02002253 }
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002254
2255#ifdef CONFIG_IRQ_REMAP
2256 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2257 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2258#endif
Joerg Roedel87361972008-06-26 21:28:07 +02002259}
2260
Joerg Roedel11ee5ac2012-06-12 16:30:06 +02002261static void enable_iommus_v2(void)
2262{
2263 struct amd_iommu *iommu;
2264
2265 for_each_iommu(iommu) {
2266 iommu_enable_ppr_log(iommu);
2267 iommu_enable_gt(iommu);
2268 }
2269}
2270
2271static void enable_iommus(void)
2272{
2273 early_enable_iommus();
2274
2275 enable_iommus_v2();
2276}
2277
Joerg Roedel92ac4322009-05-19 19:06:27 +02002278static void disable_iommus(void)
2279{
2280 struct amd_iommu *iommu;
2281
2282 for_each_iommu(iommu)
2283 iommu_disable(iommu);
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002284
2285#ifdef CONFIG_IRQ_REMAP
2286 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2287 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2288#endif
Joerg Roedel92ac4322009-05-19 19:06:27 +02002289}
2290
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002291/*
2292 * Suspend/Resume support
2293 * disable suspend until real resume implemented
2294 */
2295
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002296static void amd_iommu_resume(void)
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002297{
Matthew Garrett5bcd7572010-10-04 14:59:31 -04002298 struct amd_iommu *iommu;
2299
2300 for_each_iommu(iommu)
2301 iommu_apply_resume_quirks(iommu);
2302
Joerg Roedel736501e2009-05-12 09:56:12 +02002303 /* re-load the hardware */
2304 enable_iommus();
Joerg Roedel3d9761e2012-03-15 16:39:21 +01002305
2306 amd_iommu_enable_interrupts();
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002307}
2308
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002309static int amd_iommu_suspend(void)
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002310{
Joerg Roedel736501e2009-05-12 09:56:12 +02002311 /* disable IOMMUs to go out of the way for BIOS */
2312 disable_iommus();
2313
2314 return 0;
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002315}
2316
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002317static struct syscore_ops amd_iommu_syscore_ops = {
Joerg Roedel7441e9c2008-06-30 20:18:02 +02002318 .suspend = amd_iommu_suspend,
2319 .resume = amd_iommu_resume,
2320};
2321
Joerg Roedel90b3eb02017-06-16 16:09:55 +02002322static void __init free_iommu_resources(void)
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002323{
Lucas Stachebcfa282016-10-26 13:09:53 +02002324 kmemleak_free(irq_lookup_table);
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002325 free_pages((unsigned long)irq_lookup_table,
2326 get_order(rlookup_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002327 irq_lookup_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002328
Julia Lawalla5919892015-09-13 14:15:31 +02002329 kmem_cache_destroy(amd_iommu_irq_cache);
2330 amd_iommu_irq_cache = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002331
2332 free_pages((unsigned long)amd_iommu_rlookup_table,
2333 get_order(rlookup_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002334 amd_iommu_rlookup_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002335
2336 free_pages((unsigned long)amd_iommu_alias_table,
2337 get_order(alias_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002338 amd_iommu_alias_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002339
2340 free_pages((unsigned long)amd_iommu_dev_table,
2341 get_order(dev_table_size));
Joerg Roedelf6019272017-06-16 16:09:58 +02002342 amd_iommu_dev_table = NULL;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002343
2344 free_iommu_all();
2345
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002346#ifdef CONFIG_GART_IOMMU
2347 /*
2348 * We failed to initialize the AMD IOMMU - try fallback to GART
2349 * if possible.
2350 */
2351 gart_iommu_init();
2352
2353#endif
2354}
2355
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002356/* SB IOAPIC is always on this device in AMD systems */
2357#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2358
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002359static bool __init check_ioapic_information(void)
2360{
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002361 const char *fw_bug = FW_BUG;
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002362 bool ret, has_sb_ioapic;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002363 int idx;
2364
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002365 has_sb_ioapic = false;
2366 ret = false;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002367
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002368 /*
2369 * If we have map overrides on the kernel command line the
2370 * messages in this function might not describe firmware bugs
2371 * anymore - so be careful
2372 */
2373 if (cmdline_maps)
2374 fw_bug = "";
2375
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002376 for (idx = 0; idx < nr_ioapics; idx++) {
2377 int devid, id = mpc_ioapic_id(idx);
2378
2379 devid = get_ioapic_devid(id);
2380 if (devid < 0) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002381 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002382 fw_bug, id);
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002383 ret = false;
2384 } else if (devid == IOAPIC_SB_DEVID) {
2385 has_sb_ioapic = true;
2386 ret = true;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002387 }
2388 }
2389
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002390 if (!has_sb_ioapic) {
2391 /*
2392 * We expect the SB IOAPIC to be listed in the IVRS
2393 * table. The system timer is connected to the SB IOAPIC
2394 * and if we don't have it in the list the system will
2395 * panic at boot time. This situation usually happens
2396 * when the BIOS is buggy and provides us the wrong
2397 * device id for the IOAPIC in the system.
2398 */
Joerg Roedel101fa032018-11-27 16:22:31 +01002399 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002400 }
2401
2402 if (!ret)
Joerg Roedel101fa032018-11-27 16:22:31 +01002403 pr_err("Disabling interrupt remapping\n");
Joerg Roedelc2ff5cf52012-10-16 14:52:51 +02002404
2405 return ret;
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002406}
2407
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002408static void __init free_dma_resources(void)
2409{
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002410 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2411 get_order(MAX_DOMAIN_ID/8));
Joerg Roedelf6019272017-06-16 16:09:58 +02002412 amd_iommu_pd_alloc_bitmap = NULL;
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002413
2414 free_unity_maps();
2415}
2416
Joerg Roedelb65233a2008-07-11 17:14:21 +02002417/*
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002418 * This is the hardware init function for AMD IOMMU in the system.
2419 * This function is called either from amd_iommu_init or from the interrupt
2420 * remapping setup code.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002421 *
2422 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002423 * four times:
Joerg Roedelb65233a2008-07-11 17:14:21 +02002424 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002425 * 1 pass) Discover the most comprehensive IVHD type to use.
2426 *
2427 * 2 pass) Find the highest PCI device id the driver has to handle.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002428 * Upon this information the size of the data structures is
2429 * determined that needs to be allocated.
2430 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002431 * 3 pass) Initialize the data structures just allocated with the
Joerg Roedelb65233a2008-07-11 17:14:21 +02002432 * information in the ACPI table about available AMD IOMMUs
2433 * in the system. It also maps the PCI devices in the
2434 * system to specific IOMMUs
2435 *
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002436 * 4 pass) After the basic data structures are allocated and
Joerg Roedelb65233a2008-07-11 17:14:21 +02002437 * initialized we update them with information about memory
2438 * remapping requirements parsed out of the ACPI table in
2439 * this last pass.
2440 *
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002441 * After everything is set up the IOMMUs are enabled and the necessary
2442 * hotplug and suspend notifiers are registered.
Joerg Roedelb65233a2008-07-11 17:14:21 +02002443 */
Joerg Roedel643511b2012-06-12 12:09:35 +02002444static int __init early_amd_iommu_init(void)
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002445{
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002446 struct acpi_table_header *ivrs_base;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002447 acpi_status status;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002448 int i, remap_cache_sz, ret = 0;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002449
Joerg Roedel643511b2012-06-12 12:09:35 +02002450 if (!amd_iommu_detected)
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002451 return -ENODEV;
2452
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002453 status = acpi_get_table("IVRS", 0, &ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002454 if (status == AE_NOT_FOUND)
2455 return -ENODEV;
2456 else if (ACPI_FAILURE(status)) {
2457 const char *err = acpi_format_exception(status);
Joerg Roedel101fa032018-11-27 16:22:31 +01002458 pr_err("IVRS table error: %s\n", err);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002459 return -EINVAL;
2460 }
2461
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002462 /*
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002463 * Validate checksum here so we don't need to do it when
2464 * we actually parse the table
2465 */
2466 ret = check_ivrs_checksum(ivrs_base);
2467 if (ret)
Rafael J. Wysocki99e8ccd2017-01-10 14:57:28 +01002468 goto out;
Suravee Suthikulpanit8c7142f2016-04-01 09:05:59 -04002469
2470 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2471 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2472
2473 /*
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002474 * First parse ACPI tables to find the largest Bus/Dev/Func
2475 * we need to handle. Upon this information the shared data
2476 * structures for the IOMMUs in the system will be allocated
2477 */
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002478 ret = find_last_devid_acpi(ivrs_base);
2479 if (ret)
Joerg Roedel3551a702010-03-01 13:52:19 +01002480 goto out;
2481
Joerg Roedelc5714842008-07-11 17:14:25 +02002482 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2483 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2484 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002485
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002486 /* Device table - directly used by all IOMMUs */
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002487 ret = -ENOMEM;
Baoquan Heb3367812017-08-09 16:33:42 +08002488 amd_iommu_dev_table = (void *)__get_free_pages(
2489 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002490 get_order(dev_table_size));
2491 if (amd_iommu_dev_table == NULL)
2492 goto out;
2493
2494 /*
2495 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2496 * IOMMU see for that device
2497 */
2498 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2499 get_order(alias_table_size));
2500 if (amd_iommu_alias_table == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002501 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002502
2503 /* IOMMU rlookup table - find the IOMMU for a specific device */
Joerg Roedel83fd5cc2008-12-16 19:17:11 +01002504 amd_iommu_rlookup_table = (void *)__get_free_pages(
2505 GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002506 get_order(rlookup_table_size));
2507 if (amd_iommu_rlookup_table == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002508 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002509
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002510 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2511 GFP_KERNEL | __GFP_ZERO,
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002512 get_order(MAX_DOMAIN_ID/8));
2513 if (amd_iommu_pd_alloc_bitmap == NULL)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002514 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002515
2516 /*
Joerg Roedel5dc8bff2008-07-11 17:14:32 +02002517 * let all alias entries point to itself
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002518 */
Joerg Roedel3a61ec32008-07-25 13:07:50 +02002519 for (i = 0; i <= amd_iommu_last_bdf; ++i)
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002520 amd_iommu_alias_table[i] = i;
2521
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002522 /*
2523 * never allocate domain 0 because its used as the non-allocated and
2524 * error value placeholder
2525 */
Baoquan He5c87f622016-09-15 16:50:51 +08002526 __set_bit(0, amd_iommu_pd_alloc_bitmap);
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002527
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002528 spin_lock_init(&amd_iommu_pd_lock);
2529
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002530 /*
2531 * now the data structures are allocated and basically initialized
2532 * start the real acpi table scan
2533 */
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002534 ret = init_iommu_all(ivrs_base);
2535 if (ret)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002536 goto out;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002537
Joerg Roedel11123742017-06-16 16:09:54 +02002538 /* Disable any previously enabled IOMMUs */
Baoquan He20b46df2017-08-09 16:33:44 +08002539 if (!is_kdump_kernel() || amd_iommu_disabled)
2540 disable_iommus();
Joerg Roedel11123742017-06-16 16:09:54 +02002541
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002542 if (amd_iommu_irq_remap)
2543 amd_iommu_irq_remap = check_ioapic_information();
2544
Joerg Roedel05152a02012-06-15 16:53:51 +02002545 if (amd_iommu_irq_remap) {
2546 /*
2547 * Interrupt remapping enabled, create kmem_cache for the
2548 * remapping tables.
2549 */
Wei Yongjun83ed9c12013-04-23 10:47:44 +08002550 ret = -ENOMEM;
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002551 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2552 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2553 else
2554 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
Joerg Roedel05152a02012-06-15 16:53:51 +02002555 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002556 remap_cache_sz,
2557 IRQ_TABLE_ALIGNMENT,
2558 0, NULL);
Joerg Roedel05152a02012-06-15 16:53:51 +02002559 if (!amd_iommu_irq_cache)
2560 goto out;
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002561
2562 irq_lookup_table = (void *)__get_free_pages(
2563 GFP_KERNEL | __GFP_ZERO,
2564 get_order(rlookup_table_size));
Lucas Stachebcfa282016-10-26 13:09:53 +02002565 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2566 1, GFP_KERNEL);
Joerg Roedel0ea2c422012-06-15 18:05:20 +02002567 if (!irq_lookup_table)
2568 goto out;
Joerg Roedel05152a02012-06-15 16:53:51 +02002569 }
2570
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002571 ret = init_memory_definitions(ivrs_base);
2572 if (ret)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002573 goto out;
Joerg Roedel3551a702010-03-01 13:52:19 +01002574
Joerg Roedeleb1eb7a2012-07-05 11:58:02 +02002575 /* init the device table */
2576 init_device_table();
2577
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002578out:
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002579 /* Don't leak any ACPI memory */
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002580 acpi_put_table(ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002581 ivrs_base = NULL;
2582
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002583 return ret;
Joerg Roedel643511b2012-06-12 12:09:35 +02002584}
2585
Gerard Snitselaarae295142012-03-16 11:38:22 -07002586static int amd_iommu_enable_interrupts(void)
Joerg Roedel3d9761e2012-03-15 16:39:21 +01002587{
2588 struct amd_iommu *iommu;
2589 int ret = 0;
2590
2591 for_each_iommu(iommu) {
2592 ret = iommu_init_msi(iommu);
2593 if (ret)
2594 goto out;
2595 }
2596
2597out:
2598 return ret;
2599}
2600
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002601static bool detect_ivrs(void)
2602{
2603 struct acpi_table_header *ivrs_base;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002604 acpi_status status;
2605
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002606 status = acpi_get_table("IVRS", 0, &ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002607 if (status == AE_NOT_FOUND)
2608 return false;
2609 else if (ACPI_FAILURE(status)) {
2610 const char *err = acpi_format_exception(status);
Joerg Roedel101fa032018-11-27 16:22:31 +01002611 pr_err("IVRS table error: %s\n", err);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002612 return false;
2613 }
2614
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002615 acpi_put_table(ivrs_base);
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002616
Joerg Roedel1adb7d32012-08-06 14:18:42 +02002617 /* Make sure ACS will be enabled during PCI probe */
2618 pci_request_acs();
2619
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002620 return true;
2621}
2622
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002623/****************************************************************************
2624 *
2625 * AMD IOMMU Initialization State Machine
2626 *
2627 ****************************************************************************/
2628
2629static int __init state_next(void)
2630{
2631 int ret = 0;
2632
2633 switch (init_state) {
2634 case IOMMU_START_STATE:
2635 if (!detect_ivrs()) {
2636 init_state = IOMMU_NOT_FOUND;
2637 ret = -ENODEV;
2638 } else {
2639 init_state = IOMMU_IVRS_DETECTED;
2640 }
2641 break;
2642 case IOMMU_IVRS_DETECTED:
2643 ret = early_amd_iommu_init();
2644 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
Joerg Roedel7ad820e2017-06-16 16:09:59 +02002645 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002646 pr_info("AMD IOMMU disabled on kernel command-line\n");
Joerg Roedel7ad820e2017-06-16 16:09:59 +02002647 free_dma_resources();
2648 free_iommu_resources();
2649 init_state = IOMMU_CMDLINE_DISABLED;
2650 ret = -EINVAL;
2651 }
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002652 break;
2653 case IOMMU_ACPI_FINISHED:
2654 early_enable_iommus();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002655 x86_platform.iommu_shutdown = disable_iommus;
2656 init_state = IOMMU_ENABLED;
2657 break;
2658 case IOMMU_ENABLED:
Joerg Roedel74ddda72017-07-26 14:17:55 +02002659 register_syscore_ops(&amd_iommu_syscore_ops);
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002660 ret = amd_iommu_init_pci();
2661 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2662 enable_iommus_v2();
2663 break;
2664 case IOMMU_PCI_INIT:
2665 ret = amd_iommu_enable_interrupts();
2666 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2667 break;
2668 case IOMMU_INTERRUPTS_EN:
Joerg Roedel1e6a7b02015-07-28 16:58:48 +02002669 ret = amd_iommu_init_dma_ops();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002670 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2671 break;
2672 case IOMMU_DMA_OPS:
2673 init_state = IOMMU_INITIALIZED;
2674 break;
2675 case IOMMU_INITIALIZED:
2676 /* Nothing to do */
2677 break;
2678 case IOMMU_NOT_FOUND:
2679 case IOMMU_INIT_ERROR:
Joerg Roedel1b1e9422017-06-16 16:09:56 +02002680 case IOMMU_CMDLINE_DISABLED:
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002681 /* Error states => do nothing */
2682 ret = -EINVAL;
2683 break;
2684 default:
2685 /* Unknown state */
2686 BUG();
2687 }
2688
2689 return ret;
2690}
2691
2692static int __init iommu_go_to_state(enum iommu_init_state state)
2693{
Joerg Roedel151b0902017-06-16 16:09:57 +02002694 int ret = -EINVAL;
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002695
2696 while (init_state != state) {
Joerg Roedel1b1e9422017-06-16 16:09:56 +02002697 if (init_state == IOMMU_NOT_FOUND ||
2698 init_state == IOMMU_INIT_ERROR ||
2699 init_state == IOMMU_CMDLINE_DISABLED)
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002700 break;
Joerg Roedel151b0902017-06-16 16:09:57 +02002701 ret = state_next();
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002702 }
2703
2704 return ret;
2705}
2706
Joerg Roedel6b474b82012-06-26 16:46:04 +02002707#ifdef CONFIG_IRQ_REMAP
2708int __init amd_iommu_prepare(void)
2709{
Thomas Gleixner3f4cb7c2015-01-23 14:32:46 +01002710 int ret;
2711
Jiang Liu7fa1c842015-01-07 15:31:42 +08002712 amd_iommu_irq_remap = true;
Joerg Roedel84d07792015-01-07 15:31:39 +08002713
Thomas Gleixner3f4cb7c2015-01-23 14:32:46 +01002714 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2715 if (ret)
2716 return ret;
2717 return amd_iommu_irq_remap ? 0 : -ENODEV;
Joerg Roedel6b474b82012-06-26 16:46:04 +02002718}
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002719
Joerg Roedel6b474b82012-06-26 16:46:04 +02002720int __init amd_iommu_enable(void)
2721{
2722 int ret;
2723
2724 ret = iommu_go_to_state(IOMMU_ENABLED);
2725 if (ret)
2726 return ret;
2727
2728 irq_remapping_enabled = 1;
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05002729 return amd_iommu_xt_mode;
Joerg Roedel6b474b82012-06-26 16:46:04 +02002730}
2731
2732void amd_iommu_disable(void)
2733{
2734 amd_iommu_suspend();
2735}
2736
2737int amd_iommu_reenable(int mode)
2738{
2739 amd_iommu_resume();
2740
2741 return 0;
2742}
2743
2744int __init amd_iommu_enable_faulting(void)
2745{
2746 /* We enable MSI later when PCI is initialized */
2747 return 0;
2748}
2749#endif
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002750
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002751/*
2752 * This is the core init function for AMD IOMMU hardware in the system.
2753 * This function is called from the generic x86 DMA layer initialization
2754 * code.
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002755 */
2756static int __init amd_iommu_init(void)
2757{
Gary R Hook7d0f5fd2018-06-12 16:41:30 -05002758 struct amd_iommu *iommu;
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002759 int ret;
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002760
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002761 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2762 if (ret) {
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002763 free_dma_resources();
2764 if (!irq_remapping_enabled) {
2765 disable_iommus();
Joerg Roedel90b3eb02017-06-16 16:09:55 +02002766 free_iommu_resources();
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002767 } else {
Joerg Roedeld04e0ba2012-07-02 16:02:20 +02002768 uninit_device_table_dma();
2769 for_each_iommu(iommu)
2770 iommu_flush_all_caches(iommu);
2771 }
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002772 }
Joerg Roedel8704a1b2012-03-01 15:57:53 +01002773
Gary R Hook7d0f5fd2018-06-12 16:41:30 -05002774 for_each_iommu(iommu)
2775 amd_iommu_debugfs_setup(iommu);
2776
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002777 return ret;
Joerg Roedelfe74c9c2008-06-26 21:27:50 +02002778}
2779
Tom Lendacky2543a782017-07-17 16:10:24 -05002780static bool amd_iommu_sme_check(void)
2781{
2782 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2783 return true;
2784
2785 /* For Fam17h, a specific level of support is required */
2786 if (boot_cpu_data.microcode >= 0x08001205)
2787 return true;
2788
2789 if ((boot_cpu_data.microcode >= 0x08001126) &&
2790 (boot_cpu_data.microcode <= 0x080011ff))
2791 return true;
2792
Joerg Roedel101fa032018-11-27 16:22:31 +01002793 pr_notice("IOMMU not currently supported when SME is active\n");
Tom Lendacky2543a782017-07-17 16:10:24 -05002794
2795 return false;
2796}
2797
Joerg Roedelb65233a2008-07-11 17:14:21 +02002798/****************************************************************************
2799 *
2800 * Early detect code. This code runs at IOMMU detection time in the DMA
2801 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2802 * IOMMUs
2803 *
2804 ****************************************************************************/
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -04002805int __init amd_iommu_detect(void)
Joerg Roedelae7877d2008-06-26 21:27:51 +02002806{
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002807 int ret;
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002808
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09002809 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -04002810 return -ENODEV;
Joerg Roedelae7877d2008-06-26 21:27:51 +02002811
Tom Lendacky2543a782017-07-17 16:10:24 -05002812 if (!amd_iommu_sme_check())
2813 return -ENODEV;
2814
Joerg Roedel2c0ae172012-06-12 15:59:30 +02002815 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2816 if (ret)
2817 return ret;
Linus Torvalds11bd04f2009-12-11 12:18:16 -08002818
Joerg Roedel02f3b3f2012-06-11 17:45:25 +02002819 amd_iommu_detected = true;
2820 iommu_detected = 1;
2821 x86_init.iommu.iommu_init = amd_iommu_init;
2822
Jérôme Glisse4781bc42015-08-31 18:13:03 -04002823 return 1;
Joerg Roedelae7877d2008-06-26 21:27:51 +02002824}
2825
Joerg Roedelb65233a2008-07-11 17:14:21 +02002826/****************************************************************************
2827 *
2828 * Parsing functions for the AMD IOMMU specific kernel command line
2829 * options.
2830 *
2831 ****************************************************************************/
2832
Joerg Roedelfefda112009-05-20 12:21:42 +02002833static int __init parse_amd_iommu_dump(char *str)
2834{
2835 amd_iommu_dump = true;
2836
2837 return 1;
2838}
2839
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002840static int __init parse_amd_iommu_intr(char *str)
2841{
2842 for (; *str; ++str) {
2843 if (strncmp(str, "legacy", 6) == 0) {
2844 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2845 break;
2846 }
2847 if (strncmp(str, "vapic", 5) == 0) {
2848 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2849 break;
2850 }
2851 }
2852 return 1;
2853}
2854
Joerg Roedel918ad6c2008-06-26 21:27:52 +02002855static int __init parse_amd_iommu_options(char *str)
2856{
2857 for (; *str; ++str) {
Joerg Roedel695b5672008-11-17 15:16:43 +01002858 if (strncmp(str, "fullflush", 9) == 0)
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +09002859 amd_iommu_unmap_flush = true;
Joerg Roedela5235722010-05-11 17:12:33 +02002860 if (strncmp(str, "off", 3) == 0)
2861 amd_iommu_disabled = true;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002862 if (strncmp(str, "force_isolation", 15) == 0)
2863 amd_iommu_force_isolation = true;
Joerg Roedel918ad6c2008-06-26 21:27:52 +02002864 }
2865
2866 return 1;
2867}
2868
Joerg Roedel440e89982013-04-09 16:35:28 +02002869static int __init parse_ivrs_ioapic(char *str)
2870{
2871 unsigned int bus, dev, fn;
2872 int ret, id, i;
2873 u16 devid;
2874
2875 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2876
2877 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002878 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
Joerg Roedel440e89982013-04-09 16:35:28 +02002879 return 1;
2880 }
2881
2882 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002883 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
Joerg Roedel440e89982013-04-09 16:35:28 +02002884 str);
2885 return 1;
2886 }
2887
2888 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2889
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002890 cmdline_maps = true;
Joerg Roedel440e89982013-04-09 16:35:28 +02002891 i = early_ioapic_map_size++;
2892 early_ioapic_map[i].id = id;
2893 early_ioapic_map[i].devid = devid;
2894 early_ioapic_map[i].cmd_line = true;
2895
2896 return 1;
2897}
2898
2899static int __init parse_ivrs_hpet(char *str)
2900{
2901 unsigned int bus, dev, fn;
2902 int ret, id, i;
2903 u16 devid;
2904
2905 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2906
2907 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002908 pr_err("Invalid command line: ivrs_hpet%s\n", str);
Joerg Roedel440e89982013-04-09 16:35:28 +02002909 return 1;
2910 }
2911
2912 if (early_hpet_map_size == EARLY_MAP_SIZE) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002913 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
Joerg Roedel440e89982013-04-09 16:35:28 +02002914 str);
2915 return 1;
2916 }
2917
2918 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2919
Joerg Roedeldfbb6d42013-04-09 19:06:18 +02002920 cmdline_maps = true;
Joerg Roedel440e89982013-04-09 16:35:28 +02002921 i = early_hpet_map_size++;
2922 early_hpet_map[i].id = id;
2923 early_hpet_map[i].devid = devid;
2924 early_hpet_map[i].cmd_line = true;
2925
2926 return 1;
2927}
2928
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04002929static int __init parse_ivrs_acpihid(char *str)
2930{
2931 u32 bus, dev, fn;
2932 char *hid, *uid, *p;
2933 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2934 int ret, i;
2935
2936 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2937 if (ret != 4) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002938 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04002939 return 1;
2940 }
2941
2942 p = acpiid;
2943 hid = strsep(&p, ":");
2944 uid = p;
2945
2946 if (!hid || !(*hid) || !uid) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002947 pr_err("Invalid command line: hid or uid\n");
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04002948 return 1;
2949 }
2950
2951 i = early_acpihid_map_size++;
2952 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2953 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2954 early_acpihid_map[i].devid =
2955 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2956 early_acpihid_map[i].cmd_line = true;
2957
2958 return 1;
2959}
2960
Joerg Roedel440e89982013-04-09 16:35:28 +02002961__setup("amd_iommu_dump", parse_amd_iommu_dump);
2962__setup("amd_iommu=", parse_amd_iommu_options);
Suravee Suthikulpanit3928aa32016-08-23 13:52:32 -05002963__setup("amd_iommu_intr=", parse_amd_iommu_intr);
Joerg Roedel440e89982013-04-09 16:35:28 +02002964__setup("ivrs_ioapic", parse_ivrs_ioapic);
2965__setup("ivrs_hpet", parse_ivrs_hpet);
Suravee Suthikulpanitca3bf5d2016-04-01 09:06:01 -04002966__setup("ivrs_acpihid", parse_ivrs_acpihid);
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -04002967
2968IOMMU_INIT_FINISH(amd_iommu_detect,
2969 gart_iommu_hole_init,
Joerg Roedel98f1ad22012-07-06 13:28:37 +02002970 NULL,
2971 NULL);
Joerg Roedel400a28a2011-11-28 15:11:02 +01002972
2973bool amd_iommu_v2_supported(void)
2974{
2975 return amd_iommu_v2_present;
2976}
2977EXPORT_SYMBOL(amd_iommu_v2_supported);
Steven L Kinney30861dd2013-06-05 16:11:48 -05002978
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002979struct amd_iommu *get_amd_iommu(unsigned int idx)
2980{
2981 unsigned int i = 0;
2982 struct amd_iommu *iommu;
2983
2984 for_each_iommu(iommu)
2985 if (i++ == idx)
2986 return iommu;
2987 return NULL;
2988}
2989EXPORT_SYMBOL(get_amd_iommu);
2990
Steven L Kinney30861dd2013-06-05 16:11:48 -05002991/****************************************************************************
2992 *
2993 * IOMMU EFR Performance Counter support functionality. This code allows
2994 * access to the IOMMU PC functionality.
2995 *
2996 ****************************************************************************/
2997
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06002998u8 amd_iommu_pc_get_max_banks(unsigned int idx)
Steven L Kinney30861dd2013-06-05 16:11:48 -05002999{
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003000 struct amd_iommu *iommu = get_amd_iommu(idx);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003001
Steven L Kinney30861dd2013-06-05 16:11:48 -05003002 if (iommu)
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003003 return iommu->max_banks;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003004
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003005 return 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003006}
3007EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3008
3009bool amd_iommu_pc_supported(void)
3010{
3011 return amd_iommu_pc_present;
3012}
3013EXPORT_SYMBOL(amd_iommu_pc_supported);
3014
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003015u8 amd_iommu_pc_get_max_counters(unsigned int idx)
Steven L Kinney30861dd2013-06-05 16:11:48 -05003016{
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003017 struct amd_iommu *iommu = get_amd_iommu(idx);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003018
Steven L Kinney30861dd2013-06-05 16:11:48 -05003019 if (iommu)
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003020 return iommu->max_counters;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003021
Suravee Suthikulpanitf5863a02017-02-24 02:48:18 -06003022 return 0;
Steven L Kinney30861dd2013-06-05 16:11:48 -05003023}
3024EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3025
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003026static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3027 u8 fxn, u64 *value, bool is_write)
Steven L Kinney30861dd2013-06-05 16:11:48 -05003028{
Steven L Kinney30861dd2013-06-05 16:11:48 -05003029 u32 offset;
3030 u32 max_offset_lim;
3031
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003032 /* Make sure the IOMMU PC resource is available */
3033 if (!amd_iommu_pc_present)
3034 return -ENODEV;
3035
Steven L Kinney30861dd2013-06-05 16:11:48 -05003036 /* Check for valid iommu and pc register indexing */
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003037 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
Steven L Kinney30861dd2013-06-05 16:11:48 -05003038 return -ENODEV;
3039
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003040 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003041
3042 /* Limit the offset to the hw defined mmio region aperture */
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003043 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
Steven L Kinney30861dd2013-06-05 16:11:48 -05003044 (iommu->max_counters << 8) | 0x28);
3045 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3046 (offset > max_offset_lim))
3047 return -EINVAL;
3048
3049 if (is_write) {
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003050 u64 val = *value & GENMASK_ULL(47, 0);
3051
3052 writel((u32)val, iommu->mmio_base + offset);
3053 writel((val >> 32), iommu->mmio_base + offset + 4);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003054 } else {
3055 *value = readl(iommu->mmio_base + offset + 4);
3056 *value <<= 32;
Suravee Suthikulpanit0a6d80c2017-02-24 02:48:16 -06003057 *value |= readl(iommu->mmio_base + offset);
3058 *value &= GENMASK_ULL(47, 0);
Steven L Kinney30861dd2013-06-05 16:11:48 -05003059 }
3060
3061 return 0;
3062}
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003063
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003064int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003065{
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003066 if (!iommu)
3067 return -EINVAL;
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003068
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003069 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
Suravee Suthikulpanit38e45d02016-02-23 13:03:30 +01003070}
Suravee Suthikulpanit1650dfd2017-02-24 02:48:19 -06003071EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3072
3073int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3074{
3075 if (!iommu)
3076 return -EINVAL;
3077
3078 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3079}
3080EXPORT_SYMBOL(amd_iommu_pc_set_reg);