Andrew Murray | 2d7ca2c | 2018-10-10 11:29:26 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2 | /* |
| 3 | * IOMMU API for ARM architected SMMUv3 implementations. |
| 4 | * |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 5 | * Copyright (C) 2015 ARM Limited |
| 6 | * |
| 7 | * Author: Will Deacon <will.deacon@arm.com> |
| 8 | * |
| 9 | * This driver is powered by bad coffee and bombay mix. |
| 10 | */ |
| 11 | |
Lorenzo Pieralisi | e4dadfa | 2016-11-21 10:01:43 +0000 | [diff] [blame] | 12 | #include <linux/acpi.h> |
| 13 | #include <linux/acpi_iort.h> |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 14 | #include <linux/bitfield.h> |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 15 | #include <linux/bitops.h> |
Will Deacon | b63b343 | 2018-07-25 15:58:43 +0100 | [diff] [blame] | 16 | #include <linux/crash_dump.h> |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 17 | #include <linux/delay.h> |
Robin Murphy | 9adb959 | 2016-01-26 18:06:36 +0000 | [diff] [blame] | 18 | #include <linux/dma-iommu.h> |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 19 | #include <linux/err.h> |
| 20 | #include <linux/interrupt.h> |
Rob Herring | b77cf11 | 2019-02-05 10:37:31 -0600 | [diff] [blame] | 21 | #include <linux/io-pgtable.h> |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 22 | #include <linux/iommu.h> |
| 23 | #include <linux/iopoll.h> |
Will Deacon | 6e8fa74 | 2019-12-19 12:03:44 +0000 | [diff] [blame] | 24 | #include <linux/module.h> |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 25 | #include <linux/msi.h> |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 26 | #include <linux/of.h> |
| 27 | #include <linux/of_address.h> |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 28 | #include <linux/of_iommu.h> |
Will Deacon | 941a802 | 2015-08-11 16:25:10 +0100 | [diff] [blame] | 29 | #include <linux/of_platform.h> |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 30 | #include <linux/pci.h> |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 31 | #include <linux/pci-ats.h> |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 32 | #include <linux/platform_device.h> |
| 33 | |
Robin Murphy | 08d4ca2 | 2016-09-12 17:13:46 +0100 | [diff] [blame] | 34 | #include <linux/amba/bus.h> |
| 35 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 36 | /* MMIO registers */ |
| 37 | #define ARM_SMMU_IDR0 0x0 |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 38 | #define IDR0_ST_LVL GENMASK(28, 27) |
| 39 | #define IDR0_ST_LVL_2LVL 1 |
| 40 | #define IDR0_STALL_MODEL GENMASK(25, 24) |
| 41 | #define IDR0_STALL_MODEL_STALL 0 |
| 42 | #define IDR0_STALL_MODEL_FORCE 2 |
| 43 | #define IDR0_TTENDIAN GENMASK(22, 21) |
| 44 | #define IDR0_TTENDIAN_MIXED 0 |
| 45 | #define IDR0_TTENDIAN_LE 2 |
| 46 | #define IDR0_TTENDIAN_BE 3 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 47 | #define IDR0_CD2L (1 << 19) |
| 48 | #define IDR0_VMID16 (1 << 18) |
| 49 | #define IDR0_PRI (1 << 16) |
| 50 | #define IDR0_SEV (1 << 14) |
| 51 | #define IDR0_MSI (1 << 13) |
| 52 | #define IDR0_ASID16 (1 << 12) |
| 53 | #define IDR0_ATS (1 << 10) |
| 54 | #define IDR0_HYP (1 << 9) |
| 55 | #define IDR0_COHACC (1 << 4) |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 56 | #define IDR0_TTF GENMASK(3, 2) |
| 57 | #define IDR0_TTF_AARCH64 2 |
| 58 | #define IDR0_TTF_AARCH32_64 3 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 59 | #define IDR0_S1P (1 << 1) |
| 60 | #define IDR0_S2P (1 << 0) |
| 61 | |
| 62 | #define ARM_SMMU_IDR1 0x4 |
| 63 | #define IDR1_TABLES_PRESET (1 << 30) |
| 64 | #define IDR1_QUEUES_PRESET (1 << 29) |
| 65 | #define IDR1_REL (1 << 28) |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 66 | #define IDR1_CMDQS GENMASK(25, 21) |
| 67 | #define IDR1_EVTQS GENMASK(20, 16) |
| 68 | #define IDR1_PRIQS GENMASK(15, 11) |
| 69 | #define IDR1_SSIDSIZE GENMASK(10, 6) |
| 70 | #define IDR1_SIDSIZE GENMASK(5, 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 71 | |
| 72 | #define ARM_SMMU_IDR5 0x14 |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 73 | #define IDR5_STALL_MAX GENMASK(31, 16) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 74 | #define IDR5_GRAN64K (1 << 6) |
| 75 | #define IDR5_GRAN16K (1 << 5) |
| 76 | #define IDR5_GRAN4K (1 << 4) |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 77 | #define IDR5_OAS GENMASK(2, 0) |
| 78 | #define IDR5_OAS_32_BIT 0 |
| 79 | #define IDR5_OAS_36_BIT 1 |
| 80 | #define IDR5_OAS_40_BIT 2 |
| 81 | #define IDR5_OAS_42_BIT 3 |
| 82 | #define IDR5_OAS_44_BIT 4 |
| 83 | #define IDR5_OAS_48_BIT 5 |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 84 | #define IDR5_OAS_52_BIT 6 |
Robin Murphy | dcd189e | 2018-03-26 13:35:15 +0100 | [diff] [blame] | 85 | #define IDR5_VAX GENMASK(11, 10) |
| 86 | #define IDR5_VAX_52_BIT 1 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 87 | |
| 88 | #define ARM_SMMU_CR0 0x20 |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 89 | #define CR0_ATSCHK (1 << 4) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 90 | #define CR0_CMDQEN (1 << 3) |
| 91 | #define CR0_EVTQEN (1 << 2) |
| 92 | #define CR0_PRIQEN (1 << 1) |
| 93 | #define CR0_SMMUEN (1 << 0) |
| 94 | |
| 95 | #define ARM_SMMU_CR0ACK 0x24 |
| 96 | |
| 97 | #define ARM_SMMU_CR1 0x28 |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 98 | #define CR1_TABLE_SH GENMASK(11, 10) |
| 99 | #define CR1_TABLE_OC GENMASK(9, 8) |
| 100 | #define CR1_TABLE_IC GENMASK(7, 6) |
| 101 | #define CR1_QUEUE_SH GENMASK(5, 4) |
| 102 | #define CR1_QUEUE_OC GENMASK(3, 2) |
| 103 | #define CR1_QUEUE_IC GENMASK(1, 0) |
| 104 | /* CR1 cacheability fields don't quite follow the usual TCR-style encoding */ |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 105 | #define CR1_CACHE_NC 0 |
| 106 | #define CR1_CACHE_WB 1 |
| 107 | #define CR1_CACHE_WT 2 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 108 | |
| 109 | #define ARM_SMMU_CR2 0x2c |
| 110 | #define CR2_PTM (1 << 2) |
| 111 | #define CR2_RECINVSID (1 << 1) |
| 112 | #define CR2_E2H (1 << 0) |
| 113 | |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 114 | #define ARM_SMMU_GBPA 0x44 |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 115 | #define GBPA_UPDATE (1 << 31) |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 116 | #define GBPA_ABORT (1 << 20) |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 117 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 118 | #define ARM_SMMU_IRQ_CTRL 0x50 |
| 119 | #define IRQ_CTRL_EVTQ_IRQEN (1 << 2) |
Marc Zyngier | ccd6385 | 2015-07-15 11:55:18 +0100 | [diff] [blame] | 120 | #define IRQ_CTRL_PRIQ_IRQEN (1 << 1) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 121 | #define IRQ_CTRL_GERROR_IRQEN (1 << 0) |
| 122 | |
| 123 | #define ARM_SMMU_IRQ_CTRLACK 0x54 |
| 124 | |
| 125 | #define ARM_SMMU_GERROR 0x60 |
| 126 | #define GERROR_SFM_ERR (1 << 8) |
| 127 | #define GERROR_MSI_GERROR_ABT_ERR (1 << 7) |
| 128 | #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6) |
| 129 | #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5) |
| 130 | #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4) |
| 131 | #define GERROR_PRIQ_ABT_ERR (1 << 3) |
| 132 | #define GERROR_EVTQ_ABT_ERR (1 << 2) |
| 133 | #define GERROR_CMDQ_ERR (1 << 0) |
| 134 | #define GERROR_ERR_MASK 0xfd |
| 135 | |
| 136 | #define ARM_SMMU_GERRORN 0x64 |
| 137 | |
| 138 | #define ARM_SMMU_GERROR_IRQ_CFG0 0x68 |
| 139 | #define ARM_SMMU_GERROR_IRQ_CFG1 0x70 |
| 140 | #define ARM_SMMU_GERROR_IRQ_CFG2 0x74 |
| 141 | |
| 142 | #define ARM_SMMU_STRTAB_BASE 0x80 |
| 143 | #define STRTAB_BASE_RA (1UL << 62) |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 144 | #define STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 145 | |
| 146 | #define ARM_SMMU_STRTAB_BASE_CFG 0x88 |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 147 | #define STRTAB_BASE_CFG_FMT GENMASK(17, 16) |
| 148 | #define STRTAB_BASE_CFG_FMT_LINEAR 0 |
| 149 | #define STRTAB_BASE_CFG_FMT_2LVL 1 |
| 150 | #define STRTAB_BASE_CFG_SPLIT GENMASK(10, 6) |
| 151 | #define STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 152 | |
| 153 | #define ARM_SMMU_CMDQ_BASE 0x90 |
| 154 | #define ARM_SMMU_CMDQ_PROD 0x98 |
| 155 | #define ARM_SMMU_CMDQ_CONS 0x9c |
| 156 | |
| 157 | #define ARM_SMMU_EVTQ_BASE 0xa0 |
| 158 | #define ARM_SMMU_EVTQ_PROD 0x100a8 |
| 159 | #define ARM_SMMU_EVTQ_CONS 0x100ac |
| 160 | #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0 |
| 161 | #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8 |
| 162 | #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc |
| 163 | |
| 164 | #define ARM_SMMU_PRIQ_BASE 0xc0 |
| 165 | #define ARM_SMMU_PRIQ_PROD 0x100c8 |
| 166 | #define ARM_SMMU_PRIQ_CONS 0x100cc |
| 167 | #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0 |
| 168 | #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8 |
| 169 | #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc |
| 170 | |
| 171 | /* Common MSI config fields */ |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 172 | #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 173 | #define MSI_CFG2_SH GENMASK(5, 4) |
| 174 | #define MSI_CFG2_MEMATTR GENMASK(3, 0) |
| 175 | |
| 176 | /* Common memory attribute values */ |
| 177 | #define ARM_SMMU_SH_NSH 0 |
| 178 | #define ARM_SMMU_SH_OSH 2 |
| 179 | #define ARM_SMMU_SH_ISH 3 |
| 180 | #define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1 |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 181 | #define ARM_SMMU_MEMATTR_OIWB 0xf |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 182 | |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 183 | #define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1)) |
| 184 | #define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift)) |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 185 | #define Q_OVERFLOW_FLAG (1U << 31) |
Will Deacon | 8a073da | 2019-07-02 17:15:50 +0100 | [diff] [blame] | 186 | #define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 187 | #define Q_ENT(q, p) ((q)->base + \ |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 188 | Q_IDX(&((q)->llq), p) * \ |
| 189 | (q)->ent_dwords) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 190 | |
| 191 | #define Q_BASE_RWA (1UL << 62) |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 192 | #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5) |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 193 | #define Q_BASE_LOG2SIZE GENMASK(4, 0) |
Will Deacon | 900a85c | 2019-07-02 12:53:18 +0100 | [diff] [blame] | 194 | |
| 195 | /* Ensure DMA allocations are naturally aligned */ |
| 196 | #ifdef CONFIG_CMA_ALIGNMENT |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 197 | #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT) |
Will Deacon | 900a85c | 2019-07-02 12:53:18 +0100 | [diff] [blame] | 198 | #else |
| 199 | #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1) |
| 200 | #endif |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 201 | |
| 202 | /* |
| 203 | * Stream table. |
| 204 | * |
| 205 | * Linear: Enough to cover 1 << IDR1.SIDSIZE entries |
Zhen Lei | e2f4c23 | 2015-07-07 04:30:17 +0100 | [diff] [blame] | 206 | * 2lvl: 128k L1 entries, |
| 207 | * 256 lazy entries per table (each table covers a PCI bus) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 208 | */ |
Zhen Lei | e2f4c23 | 2015-07-07 04:30:17 +0100 | [diff] [blame] | 209 | #define STRTAB_L1_SZ_SHIFT 20 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 210 | #define STRTAB_SPLIT 8 |
| 211 | |
| 212 | #define STRTAB_L1_DESC_DWORDS 1 |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 213 | #define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0) |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 214 | #define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 215 | |
| 216 | #define STRTAB_STE_DWORDS 8 |
| 217 | #define STRTAB_STE_0_V (1UL << 0) |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 218 | #define STRTAB_STE_0_CFG GENMASK_ULL(3, 1) |
| 219 | #define STRTAB_STE_0_CFG_ABORT 0 |
| 220 | #define STRTAB_STE_0_CFG_BYPASS 4 |
| 221 | #define STRTAB_STE_0_CFG_S1_TRANS 5 |
| 222 | #define STRTAB_STE_0_CFG_S2_TRANS 6 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 223 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 224 | #define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4) |
| 225 | #define STRTAB_STE_0_S1FMT_LINEAR 0 |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 226 | #define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6) |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 227 | #define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 228 | |
| 229 | #define STRTAB_STE_1_S1C_CACHE_NC 0UL |
| 230 | #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL |
| 231 | #define STRTAB_STE_1_S1C_CACHE_WT 2UL |
| 232 | #define STRTAB_STE_1_S1C_CACHE_WB 3UL |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 233 | #define STRTAB_STE_1_S1CIR GENMASK_ULL(3, 2) |
| 234 | #define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4) |
| 235 | #define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 236 | |
| 237 | #define STRTAB_STE_1_S1STALLD (1UL << 27) |
| 238 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 239 | #define STRTAB_STE_1_EATS GENMASK_ULL(29, 28) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 240 | #define STRTAB_STE_1_EATS_ABT 0UL |
| 241 | #define STRTAB_STE_1_EATS_TRANS 1UL |
| 242 | #define STRTAB_STE_1_EATS_S1CHK 2UL |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 243 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 244 | #define STRTAB_STE_1_STRW GENMASK_ULL(31, 30) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 245 | #define STRTAB_STE_1_STRW_NSEL1 0UL |
| 246 | #define STRTAB_STE_1_STRW_EL2 2UL |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 247 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 248 | #define STRTAB_STE_1_SHCFG GENMASK_ULL(45, 44) |
Will Deacon | a0eacd8 | 2015-11-18 18:15:51 +0000 | [diff] [blame] | 249 | #define STRTAB_STE_1_SHCFG_INCOMING 1UL |
Will Deacon | a0eacd8 | 2015-11-18 18:15:51 +0000 | [diff] [blame] | 250 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 251 | #define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0) |
| 252 | #define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 253 | #define STRTAB_STE_2_S2AA64 (1UL << 51) |
| 254 | #define STRTAB_STE_2_S2ENDI (1UL << 52) |
| 255 | #define STRTAB_STE_2_S2PTW (1UL << 54) |
| 256 | #define STRTAB_STE_2_S2R (1UL << 58) |
| 257 | |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 258 | #define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 259 | |
| 260 | /* Context descriptor (stage-1 only) */ |
| 261 | #define CTXDESC_CD_DWORDS 8 |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 262 | #define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0) |
| 263 | #define ARM64_TCR_T0SZ GENMASK_ULL(5, 0) |
| 264 | #define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6) |
| 265 | #define ARM64_TCR_TG0 GENMASK_ULL(15, 14) |
| 266 | #define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8) |
| 267 | #define ARM64_TCR_IRGN0 GENMASK_ULL(9, 8) |
| 268 | #define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10) |
| 269 | #define ARM64_TCR_ORGN0 GENMASK_ULL(11, 10) |
| 270 | #define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12) |
| 271 | #define ARM64_TCR_SH0 GENMASK_ULL(13, 12) |
| 272 | #define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14) |
| 273 | #define ARM64_TCR_EPD0 (1ULL << 7) |
| 274 | #define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30) |
| 275 | #define ARM64_TCR_EPD1 (1ULL << 23) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 276 | |
| 277 | #define CTXDESC_CD_0_ENDI (1UL << 15) |
| 278 | #define CTXDESC_CD_0_V (1UL << 31) |
| 279 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 280 | #define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32) |
| 281 | #define ARM64_TCR_IPS GENMASK_ULL(34, 32) |
| 282 | #define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38) |
| 283 | #define ARM64_TCR_TBI0 (1ULL << 37) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 284 | |
| 285 | #define CTXDESC_CD_0_AA64 (1UL << 41) |
Yisheng Xie | 9cff86fd2 | 2017-09-21 20:36:07 +0800 | [diff] [blame] | 286 | #define CTXDESC_CD_0_S (1UL << 44) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 287 | #define CTXDESC_CD_0_R (1UL << 45) |
| 288 | #define CTXDESC_CD_0_A (1UL << 46) |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 289 | #define CTXDESC_CD_0_ASET (1UL << 47) |
| 290 | #define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 291 | |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 292 | #define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 293 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 294 | /* Convert between AArch64 (CPU) TCR format and SMMU CD format */ |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 295 | #define ARM_SMMU_TCR2CD(tcr, fld) FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \ |
| 296 | FIELD_GET(ARM64_TCR_##fld, tcr)) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 297 | |
| 298 | /* Command queue */ |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 299 | #define CMDQ_ENT_SZ_SHIFT 4 |
| 300 | #define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3) |
| 301 | #define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 302 | |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 303 | #define CMDQ_CONS_ERR GENMASK(30, 24) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 304 | #define CMDQ_ERR_CERROR_NONE_IDX 0 |
| 305 | #define CMDQ_ERR_CERROR_ILL_IDX 1 |
| 306 | #define CMDQ_ERR_CERROR_ABT_IDX 2 |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 307 | #define CMDQ_ERR_CERROR_ATC_INV_IDX 3 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 308 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 309 | #define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG |
| 310 | |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 311 | /* |
| 312 | * This is used to size the command queue and therefore must be at least |
| 313 | * BITS_PER_LONG so that the valid_map works correctly (it relies on the |
| 314 | * total number of queue entries being a multiple of BITS_PER_LONG). |
| 315 | */ |
| 316 | #define CMDQ_BATCH_ENTRIES BITS_PER_LONG |
| 317 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 318 | #define CMDQ_0_OP GENMASK_ULL(7, 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 319 | #define CMDQ_0_SSV (1UL << 11) |
| 320 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 321 | #define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32) |
| 322 | #define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0) |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 323 | #define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 324 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 325 | #define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 326 | #define CMDQ_CFGI_1_LEAF (1UL << 0) |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 327 | #define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 328 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 329 | #define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32) |
| 330 | #define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 331 | #define CMDQ_TLBI_1_LEAF (1UL << 0) |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 332 | #define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12) |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 333 | #define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 334 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 335 | #define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12) |
| 336 | #define CMDQ_ATC_0_SID GENMASK_ULL(63, 32) |
| 337 | #define CMDQ_ATC_0_GLOBAL (1UL << 9) |
| 338 | #define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0) |
| 339 | #define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12) |
| 340 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 341 | #define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12) |
| 342 | #define CMDQ_PRI_0_SID GENMASK_ULL(63, 32) |
| 343 | #define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0) |
| 344 | #define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 345 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 346 | #define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12) |
| 347 | #define CMDQ_SYNC_0_CS_NONE 0 |
| 348 | #define CMDQ_SYNC_0_CS_IRQ 1 |
| 349 | #define CMDQ_SYNC_0_CS_SEV 2 |
| 350 | #define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22) |
| 351 | #define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24) |
| 352 | #define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32) |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 353 | #define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 354 | |
| 355 | /* Event queue */ |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 356 | #define EVTQ_ENT_SZ_SHIFT 5 |
| 357 | #define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3) |
| 358 | #define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 359 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 360 | #define EVTQ_0_ID GENMASK_ULL(7, 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 361 | |
| 362 | /* PRI queue */ |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 363 | #define PRIQ_ENT_SZ_SHIFT 4 |
| 364 | #define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3) |
| 365 | #define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 366 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 367 | #define PRIQ_0_SID GENMASK_ULL(31, 0) |
| 368 | #define PRIQ_0_SSID GENMASK_ULL(51, 32) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 369 | #define PRIQ_0_PERM_PRIV (1UL << 58) |
| 370 | #define PRIQ_0_PERM_EXEC (1UL << 59) |
| 371 | #define PRIQ_0_PERM_READ (1UL << 60) |
| 372 | #define PRIQ_0_PERM_WRITE (1UL << 61) |
| 373 | #define PRIQ_0_PRG_LAST (1UL << 62) |
| 374 | #define PRIQ_0_SSID_V (1UL << 63) |
| 375 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 376 | #define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0) |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 377 | #define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 378 | |
| 379 | /* High-level queue structures */ |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 380 | #define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */ |
| 381 | #define ARM_SMMU_POLL_SPIN_COUNT 10 |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 382 | |
Eric Auger | 50019f0 | 2017-01-19 20:57:56 +0000 | [diff] [blame] | 383 | #define MSI_IOVA_BASE 0x8000000 |
| 384 | #define MSI_IOVA_LENGTH 0x100000 |
| 385 | |
Zhen Lei | a71792d | 2018-07-12 17:28:43 +0800 | [diff] [blame] | 386 | static bool disable_bypass = 1; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 387 | module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); |
| 388 | MODULE_PARM_DESC(disable_bypass, |
| 389 | "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); |
| 390 | |
| 391 | enum pri_resp { |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 392 | PRI_RESP_DENY = 0, |
| 393 | PRI_RESP_FAIL = 1, |
| 394 | PRI_RESP_SUCC = 2, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 395 | }; |
| 396 | |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 397 | enum arm_smmu_msi_index { |
| 398 | EVTQ_MSI_INDEX, |
| 399 | GERROR_MSI_INDEX, |
| 400 | PRIQ_MSI_INDEX, |
| 401 | ARM_SMMU_MAX_MSIS, |
| 402 | }; |
| 403 | |
| 404 | static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = { |
| 405 | [EVTQ_MSI_INDEX] = { |
| 406 | ARM_SMMU_EVTQ_IRQ_CFG0, |
| 407 | ARM_SMMU_EVTQ_IRQ_CFG1, |
| 408 | ARM_SMMU_EVTQ_IRQ_CFG2, |
| 409 | }, |
| 410 | [GERROR_MSI_INDEX] = { |
| 411 | ARM_SMMU_GERROR_IRQ_CFG0, |
| 412 | ARM_SMMU_GERROR_IRQ_CFG1, |
| 413 | ARM_SMMU_GERROR_IRQ_CFG2, |
| 414 | }, |
| 415 | [PRIQ_MSI_INDEX] = { |
| 416 | ARM_SMMU_PRIQ_IRQ_CFG0, |
| 417 | ARM_SMMU_PRIQ_IRQ_CFG1, |
| 418 | ARM_SMMU_PRIQ_IRQ_CFG2, |
| 419 | }, |
| 420 | }; |
| 421 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 422 | struct arm_smmu_cmdq_ent { |
| 423 | /* Common fields */ |
| 424 | u8 opcode; |
| 425 | bool substream_valid; |
| 426 | |
| 427 | /* Command-specific fields */ |
| 428 | union { |
| 429 | #define CMDQ_OP_PREFETCH_CFG 0x1 |
| 430 | struct { |
| 431 | u32 sid; |
| 432 | u8 size; |
| 433 | u64 addr; |
| 434 | } prefetch; |
| 435 | |
| 436 | #define CMDQ_OP_CFGI_STE 0x3 |
| 437 | #define CMDQ_OP_CFGI_ALL 0x4 |
| 438 | struct { |
| 439 | u32 sid; |
| 440 | union { |
| 441 | bool leaf; |
| 442 | u8 span; |
| 443 | }; |
| 444 | } cfgi; |
| 445 | |
| 446 | #define CMDQ_OP_TLBI_NH_ASID 0x11 |
| 447 | #define CMDQ_OP_TLBI_NH_VA 0x12 |
| 448 | #define CMDQ_OP_TLBI_EL2_ALL 0x20 |
| 449 | #define CMDQ_OP_TLBI_S12_VMALL 0x28 |
| 450 | #define CMDQ_OP_TLBI_S2_IPA 0x2a |
| 451 | #define CMDQ_OP_TLBI_NSNH_ALL 0x30 |
| 452 | struct { |
| 453 | u16 asid; |
| 454 | u16 vmid; |
| 455 | bool leaf; |
| 456 | u64 addr; |
| 457 | } tlbi; |
| 458 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 459 | #define CMDQ_OP_ATC_INV 0x40 |
| 460 | #define ATC_INV_SIZE_ALL 52 |
| 461 | struct { |
| 462 | u32 sid; |
| 463 | u32 ssid; |
| 464 | u64 addr; |
| 465 | u8 size; |
| 466 | bool global; |
| 467 | } atc; |
| 468 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 469 | #define CMDQ_OP_PRI_RESP 0x41 |
| 470 | struct { |
| 471 | u32 sid; |
| 472 | u32 ssid; |
| 473 | u16 grpid; |
| 474 | enum pri_resp resp; |
| 475 | } pri; |
| 476 | |
| 477 | #define CMDQ_OP_CMD_SYNC 0x46 |
Robin Murphy | 37de98f | 2017-10-18 15:04:26 +0100 | [diff] [blame] | 478 | struct { |
Robin Murphy | 37de98f | 2017-10-18 15:04:26 +0100 | [diff] [blame] | 479 | u64 msiaddr; |
| 480 | } sync; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 481 | }; |
| 482 | }; |
| 483 | |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 484 | struct arm_smmu_ll_queue { |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 485 | union { |
| 486 | u64 val; |
| 487 | struct { |
| 488 | u32 prod; |
| 489 | u32 cons; |
| 490 | }; |
| 491 | struct { |
| 492 | atomic_t prod; |
| 493 | atomic_t cons; |
| 494 | } atomic; |
| 495 | u8 __pad[SMP_CACHE_BYTES]; |
| 496 | } ____cacheline_aligned_in_smp; |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 497 | u32 max_n_shift; |
| 498 | }; |
| 499 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 500 | struct arm_smmu_queue { |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 501 | struct arm_smmu_ll_queue llq; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 502 | int irq; /* Wired interrupt */ |
| 503 | |
| 504 | __le64 *base; |
| 505 | dma_addr_t base_dma; |
| 506 | u64 q_base; |
| 507 | |
| 508 | size_t ent_dwords; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 509 | |
| 510 | u32 __iomem *prod_reg; |
| 511 | u32 __iomem *cons_reg; |
| 512 | }; |
| 513 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 514 | struct arm_smmu_queue_poll { |
| 515 | ktime_t timeout; |
| 516 | unsigned int delay; |
| 517 | unsigned int spin_cnt; |
| 518 | bool wfe; |
| 519 | }; |
| 520 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 521 | struct arm_smmu_cmdq { |
| 522 | struct arm_smmu_queue q; |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 523 | atomic_long_t *valid_map; |
| 524 | atomic_t owner_prod; |
| 525 | atomic_t lock; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 526 | }; |
| 527 | |
| 528 | struct arm_smmu_evtq { |
| 529 | struct arm_smmu_queue q; |
| 530 | u32 max_stalls; |
| 531 | }; |
| 532 | |
| 533 | struct arm_smmu_priq { |
| 534 | struct arm_smmu_queue q; |
| 535 | }; |
| 536 | |
| 537 | /* High-level stream table and context descriptor structures */ |
| 538 | struct arm_smmu_strtab_l1_desc { |
| 539 | u8 span; |
| 540 | |
| 541 | __le64 *l2ptr; |
| 542 | dma_addr_t l2ptr_dma; |
| 543 | }; |
| 544 | |
| 545 | struct arm_smmu_s1_cfg { |
| 546 | __le64 *cdptr; |
| 547 | dma_addr_t cdptr_dma; |
| 548 | |
| 549 | struct arm_smmu_ctx_desc { |
| 550 | u16 asid; |
| 551 | u64 ttbr; |
| 552 | u64 tcr; |
| 553 | u64 mair; |
| 554 | } cd; |
| 555 | }; |
| 556 | |
| 557 | struct arm_smmu_s2_cfg { |
| 558 | u16 vmid; |
| 559 | u64 vttbr; |
| 560 | u64 vtcr; |
| 561 | }; |
| 562 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 563 | struct arm_smmu_strtab_cfg { |
| 564 | __le64 *strtab; |
| 565 | dma_addr_t strtab_dma; |
| 566 | struct arm_smmu_strtab_l1_desc *l1_desc; |
| 567 | unsigned int num_l1_ents; |
| 568 | |
| 569 | u64 strtab_base; |
| 570 | u32 strtab_base_cfg; |
| 571 | }; |
| 572 | |
| 573 | /* An SMMUv3 instance */ |
| 574 | struct arm_smmu_device { |
| 575 | struct device *dev; |
| 576 | void __iomem *base; |
| 577 | |
| 578 | #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0) |
| 579 | #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1) |
| 580 | #define ARM_SMMU_FEAT_TT_LE (1 << 2) |
| 581 | #define ARM_SMMU_FEAT_TT_BE (1 << 3) |
| 582 | #define ARM_SMMU_FEAT_PRI (1 << 4) |
| 583 | #define ARM_SMMU_FEAT_ATS (1 << 5) |
| 584 | #define ARM_SMMU_FEAT_SEV (1 << 6) |
| 585 | #define ARM_SMMU_FEAT_MSI (1 << 7) |
| 586 | #define ARM_SMMU_FEAT_COHERENCY (1 << 8) |
| 587 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9) |
| 588 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10) |
| 589 | #define ARM_SMMU_FEAT_STALLS (1 << 11) |
| 590 | #define ARM_SMMU_FEAT_HYP (1 << 12) |
Yisheng Xie | 9cff86fd2 | 2017-09-21 20:36:07 +0800 | [diff] [blame] | 591 | #define ARM_SMMU_FEAT_STALL_FORCE (1 << 13) |
Robin Murphy | dcd189e | 2018-03-26 13:35:15 +0100 | [diff] [blame] | 592 | #define ARM_SMMU_FEAT_VAX (1 << 14) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 593 | u32 features; |
| 594 | |
Zhen Lei | 5e92946 | 2015-07-07 04:30:18 +0100 | [diff] [blame] | 595 | #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 596 | #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) |
Zhen Lei | 5e92946 | 2015-07-07 04:30:18 +0100 | [diff] [blame] | 597 | u32 options; |
| 598 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 599 | struct arm_smmu_cmdq cmdq; |
| 600 | struct arm_smmu_evtq evtq; |
| 601 | struct arm_smmu_priq priq; |
| 602 | |
| 603 | int gerr_irq; |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 604 | int combined_irq; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 605 | |
| 606 | unsigned long ias; /* IPA */ |
| 607 | unsigned long oas; /* PA */ |
Robin Murphy | d546635 | 2016-05-09 17:20:09 +0100 | [diff] [blame] | 608 | unsigned long pgsize_bitmap; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 609 | |
| 610 | #define ARM_SMMU_MAX_ASIDS (1 << 16) |
| 611 | unsigned int asid_bits; |
| 612 | DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS); |
| 613 | |
| 614 | #define ARM_SMMU_MAX_VMIDS (1 << 16) |
| 615 | unsigned int vmid_bits; |
| 616 | DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS); |
| 617 | |
| 618 | unsigned int ssid_bits; |
| 619 | unsigned int sid_bits; |
| 620 | |
| 621 | struct arm_smmu_strtab_cfg strtab_cfg; |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 622 | |
| 623 | /* IOMMU core code handle */ |
| 624 | struct iommu_device iommu; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 625 | }; |
| 626 | |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 627 | /* SMMU private data for each master */ |
Jean-Philippe Brucker | b54f426 | 2019-04-17 19:24:43 +0100 | [diff] [blame] | 628 | struct arm_smmu_master { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 629 | struct arm_smmu_device *smmu; |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 630 | struct device *dev; |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 631 | struct arm_smmu_domain *domain; |
Jean-Philippe Brucker | 2a7e62f | 2019-04-17 19:24:46 +0100 | [diff] [blame] | 632 | struct list_head domain_head; |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 633 | u32 *sids; |
| 634 | unsigned int num_sids; |
Will Deacon | f75d8e3 | 2019-08-20 17:32:18 +0100 | [diff] [blame] | 635 | bool ats_enabled; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 636 | }; |
| 637 | |
| 638 | /* SMMU private data for an IOMMU domain */ |
| 639 | enum arm_smmu_domain_stage { |
| 640 | ARM_SMMU_DOMAIN_S1 = 0, |
| 641 | ARM_SMMU_DOMAIN_S2, |
| 642 | ARM_SMMU_DOMAIN_NESTED, |
Will Deacon | beb3c6a | 2017-01-06 16:27:30 +0000 | [diff] [blame] | 643 | ARM_SMMU_DOMAIN_BYPASS, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 644 | }; |
| 645 | |
| 646 | struct arm_smmu_domain { |
| 647 | struct arm_smmu_device *smmu; |
| 648 | struct mutex init_mutex; /* Protects smmu pointer */ |
| 649 | |
| 650 | struct io_pgtable_ops *pgtbl_ops; |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 651 | bool non_strict; |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 652 | atomic_t nr_ats_masters; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 653 | |
| 654 | enum arm_smmu_domain_stage stage; |
| 655 | union { |
| 656 | struct arm_smmu_s1_cfg s1_cfg; |
| 657 | struct arm_smmu_s2_cfg s2_cfg; |
| 658 | }; |
| 659 | |
| 660 | struct iommu_domain domain; |
Jean-Philippe Brucker | 2a7e62f | 2019-04-17 19:24:46 +0100 | [diff] [blame] | 661 | |
| 662 | struct list_head devices; |
| 663 | spinlock_t devices_lock; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 664 | }; |
| 665 | |
Zhen Lei | 5e92946 | 2015-07-07 04:30:18 +0100 | [diff] [blame] | 666 | struct arm_smmu_option_prop { |
| 667 | u32 opt; |
| 668 | const char *prop; |
| 669 | }; |
| 670 | |
| 671 | static struct arm_smmu_option_prop arm_smmu_options[] = { |
| 672 | { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 673 | { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, |
Zhen Lei | 5e92946 | 2015-07-07 04:30:18 +0100 | [diff] [blame] | 674 | { 0, NULL}, |
| 675 | }; |
| 676 | |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 677 | static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset, |
| 678 | struct arm_smmu_device *smmu) |
| 679 | { |
| 680 | if ((offset > SZ_64K) && |
| 681 | (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)) |
| 682 | offset -= SZ_64K; |
| 683 | |
| 684 | return smmu->base + offset; |
| 685 | } |
| 686 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 687 | static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) |
| 688 | { |
| 689 | return container_of(dom, struct arm_smmu_domain, domain); |
| 690 | } |
| 691 | |
Zhen Lei | 5e92946 | 2015-07-07 04:30:18 +0100 | [diff] [blame] | 692 | static void parse_driver_options(struct arm_smmu_device *smmu) |
| 693 | { |
| 694 | int i = 0; |
| 695 | |
| 696 | do { |
| 697 | if (of_property_read_bool(smmu->dev->of_node, |
| 698 | arm_smmu_options[i].prop)) { |
| 699 | smmu->options |= arm_smmu_options[i].opt; |
| 700 | dev_notice(smmu->dev, "option %s\n", |
| 701 | arm_smmu_options[i].prop); |
| 702 | } |
| 703 | } while (arm_smmu_options[++i].opt); |
| 704 | } |
| 705 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 706 | /* Low-level queue manipulation functions */ |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 707 | static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n) |
| 708 | { |
| 709 | u32 space, prod, cons; |
| 710 | |
| 711 | prod = Q_IDX(q, q->prod); |
| 712 | cons = Q_IDX(q, q->cons); |
| 713 | |
| 714 | if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) |
| 715 | space = (1 << q->max_n_shift) - (prod - cons); |
| 716 | else |
| 717 | space = cons - prod; |
| 718 | |
| 719 | return space >= n; |
| 720 | } |
| 721 | |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 722 | static bool queue_full(struct arm_smmu_ll_queue *q) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 723 | { |
| 724 | return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && |
| 725 | Q_WRP(q, q->prod) != Q_WRP(q, q->cons); |
| 726 | } |
| 727 | |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 728 | static bool queue_empty(struct arm_smmu_ll_queue *q) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 729 | { |
| 730 | return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && |
| 731 | Q_WRP(q, q->prod) == Q_WRP(q, q->cons); |
| 732 | } |
| 733 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 734 | static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 735 | { |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 736 | return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && |
| 737 | (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || |
| 738 | ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && |
| 739 | (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 740 | } |
| 741 | |
Will Deacon | 2a8868f | 2019-07-02 17:12:24 +0100 | [diff] [blame] | 742 | static void queue_sync_cons_out(struct arm_smmu_queue *q) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 743 | { |
Will Deacon | a868e85 | 2018-11-07 22:58:24 +0000 | [diff] [blame] | 744 | /* |
| 745 | * Ensure that all CPU accesses (reads and writes) to the queue |
| 746 | * are complete before we update the cons pointer. |
| 747 | */ |
| 748 | mb(); |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 749 | writel_relaxed(q->llq.cons, q->cons_reg); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 750 | } |
| 751 | |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 752 | static void queue_inc_cons(struct arm_smmu_ll_queue *q) |
Will Deacon | 2a8868f | 2019-07-02 17:12:24 +0100 | [diff] [blame] | 753 | { |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 754 | u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; |
| 755 | q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); |
Will Deacon | 2a8868f | 2019-07-02 17:12:24 +0100 | [diff] [blame] | 756 | } |
| 757 | |
| 758 | static int queue_sync_prod_in(struct arm_smmu_queue *q) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 759 | { |
| 760 | int ret = 0; |
| 761 | u32 prod = readl_relaxed(q->prod_reg); |
| 762 | |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 763 | if (Q_OVF(prod) != Q_OVF(q->llq.prod)) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 764 | ret = -EOVERFLOW; |
| 765 | |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 766 | q->llq.prod = prod; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 767 | return ret; |
| 768 | } |
| 769 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 770 | static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 771 | { |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 772 | u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; |
| 773 | return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 774 | } |
| 775 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 776 | static void queue_poll_init(struct arm_smmu_device *smmu, |
| 777 | struct arm_smmu_queue_poll *qp) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 778 | { |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 779 | qp->delay = 1; |
| 780 | qp->spin_cnt = 0; |
| 781 | qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); |
| 782 | qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 783 | } |
Sunil Goutham | b847de4 | 2017-05-05 16:47:46 +0530 | [diff] [blame] | 784 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 785 | static int queue_poll(struct arm_smmu_queue_poll *qp) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 786 | { |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 787 | if (ktime_compare(ktime_get(), qp->timeout) > 0) |
| 788 | return -ETIMEDOUT; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 789 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 790 | if (qp->wfe) { |
| 791 | wfe(); |
| 792 | } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { |
| 793 | cpu_relax(); |
| 794 | } else { |
| 795 | udelay(qp->delay); |
| 796 | qp->delay *= 2; |
| 797 | qp->spin_cnt = 0; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 798 | } |
| 799 | |
| 800 | return 0; |
| 801 | } |
| 802 | |
| 803 | static void queue_write(__le64 *dst, u64 *src, size_t n_dwords) |
| 804 | { |
| 805 | int i; |
| 806 | |
| 807 | for (i = 0; i < n_dwords; ++i) |
| 808 | *dst++ = cpu_to_le64(*src++); |
| 809 | } |
| 810 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 811 | static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) |
| 812 | { |
| 813 | int i; |
| 814 | |
| 815 | for (i = 0; i < n_dwords; ++i) |
| 816 | *dst++ = le64_to_cpu(*src++); |
| 817 | } |
| 818 | |
| 819 | static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) |
| 820 | { |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 821 | if (queue_empty(&q->llq)) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 822 | return -EAGAIN; |
| 823 | |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 824 | queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 825 | queue_inc_cons(&q->llq); |
Will Deacon | 2a8868f | 2019-07-02 17:12:24 +0100 | [diff] [blame] | 826 | queue_sync_cons_out(q); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 827 | return 0; |
| 828 | } |
| 829 | |
| 830 | /* High-level queue accessors */ |
| 831 | static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) |
| 832 | { |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 833 | memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT); |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 834 | cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 835 | |
| 836 | switch (ent->opcode) { |
| 837 | case CMDQ_OP_TLBI_EL2_ALL: |
| 838 | case CMDQ_OP_TLBI_NSNH_ALL: |
| 839 | break; |
| 840 | case CMDQ_OP_PREFETCH_CFG: |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 841 | cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid); |
| 842 | cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 843 | cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK; |
| 844 | break; |
| 845 | case CMDQ_OP_CFGI_STE: |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 846 | cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); |
| 847 | cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 848 | break; |
| 849 | case CMDQ_OP_CFGI_ALL: |
| 850 | /* Cover the entire SID range */ |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 851 | cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 852 | break; |
| 853 | case CMDQ_OP_TLBI_NH_VA: |
Shameer Kolothum | 935d43b | 2019-11-13 16:11:38 +0000 | [diff] [blame] | 854 | cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 855 | cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); |
| 856 | cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); |
Will Deacon | 1c27df1 | 2015-09-18 16:12:56 +0100 | [diff] [blame] | 857 | cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; |
| 858 | break; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 859 | case CMDQ_OP_TLBI_S2_IPA: |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 860 | cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); |
| 861 | cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); |
Will Deacon | 1c27df1 | 2015-09-18 16:12:56 +0100 | [diff] [blame] | 862 | cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 863 | break; |
| 864 | case CMDQ_OP_TLBI_NH_ASID: |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 865 | cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 866 | /* Fallthrough */ |
| 867 | case CMDQ_OP_TLBI_S12_VMALL: |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 868 | cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 869 | break; |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 870 | case CMDQ_OP_ATC_INV: |
| 871 | cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); |
| 872 | cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global); |
| 873 | cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid); |
| 874 | cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid); |
| 875 | cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size); |
| 876 | cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK; |
| 877 | break; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 878 | case CMDQ_OP_PRI_RESP: |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 879 | cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); |
| 880 | cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid); |
| 881 | cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid); |
| 882 | cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 883 | switch (ent->pri.resp) { |
| 884 | case PRI_RESP_DENY: |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 885 | case PRI_RESP_FAIL: |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 886 | case PRI_RESP_SUCC: |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 887 | break; |
| 888 | default: |
| 889 | return -EINVAL; |
| 890 | } |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 891 | cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 892 | break; |
| 893 | case CMDQ_OP_CMD_SYNC: |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 894 | if (ent->sync.msiaddr) { |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 895 | cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ); |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 896 | cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; |
| 897 | } else { |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 898 | cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV); |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 899 | } |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 900 | cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH); |
| 901 | cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 902 | break; |
| 903 | default: |
| 904 | return -ENOENT; |
| 905 | } |
| 906 | |
| 907 | return 0; |
| 908 | } |
| 909 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 910 | static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, |
| 911 | u32 prod) |
| 912 | { |
| 913 | struct arm_smmu_queue *q = &smmu->cmdq.q; |
| 914 | struct arm_smmu_cmdq_ent ent = { |
| 915 | .opcode = CMDQ_OP_CMD_SYNC, |
| 916 | }; |
| 917 | |
| 918 | /* |
| 919 | * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI |
| 920 | * payload, so the write will zero the entire command on that platform. |
| 921 | */ |
| 922 | if (smmu->features & ARM_SMMU_FEAT_MSI && |
| 923 | smmu->features & ARM_SMMU_FEAT_COHERENCY) { |
| 924 | ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * |
| 925 | q->ent_dwords * 8; |
| 926 | } |
| 927 | |
| 928 | arm_smmu_cmdq_build_cmd(cmd, &ent); |
| 929 | } |
| 930 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 931 | static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) |
| 932 | { |
| 933 | static const char *cerror_str[] = { |
| 934 | [CMDQ_ERR_CERROR_NONE_IDX] = "No error", |
| 935 | [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command", |
| 936 | [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch", |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 937 | [CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout", |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 938 | }; |
| 939 | |
| 940 | int i; |
| 941 | u64 cmd[CMDQ_ENT_DWORDS]; |
| 942 | struct arm_smmu_queue *q = &smmu->cmdq.q; |
| 943 | u32 cons = readl_relaxed(q->cons_reg); |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 944 | u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 945 | struct arm_smmu_cmdq_ent cmd_sync = { |
| 946 | .opcode = CMDQ_OP_CMD_SYNC, |
| 947 | }; |
| 948 | |
| 949 | dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, |
Will Deacon | a0d5c04 | 2015-12-04 12:00:29 +0000 | [diff] [blame] | 950 | idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 951 | |
| 952 | switch (idx) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 953 | case CMDQ_ERR_CERROR_ABT_IDX: |
| 954 | dev_err(smmu->dev, "retrying command fetch\n"); |
| 955 | case CMDQ_ERR_CERROR_NONE_IDX: |
| 956 | return; |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 957 | case CMDQ_ERR_CERROR_ATC_INV_IDX: |
| 958 | /* |
| 959 | * ATC Invalidation Completion timeout. CONS is still pointing |
| 960 | * at the CMD_SYNC. Attempt to complete other pending commands |
| 961 | * by repeating the CMD_SYNC, though we might well end up back |
| 962 | * here since the ATC invalidation may still be pending. |
| 963 | */ |
| 964 | return; |
Will Deacon | a0d5c04 | 2015-12-04 12:00:29 +0000 | [diff] [blame] | 965 | case CMDQ_ERR_CERROR_ILL_IDX: |
| 966 | /* Fallthrough */ |
| 967 | default: |
| 968 | break; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 969 | } |
| 970 | |
| 971 | /* |
| 972 | * We may have concurrent producers, so we need to be careful |
| 973 | * not to touch any of the shadow cmdq state. |
| 974 | */ |
Will Deacon | aea2037 | 2016-07-29 11:15:37 +0100 | [diff] [blame] | 975 | queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 976 | dev_err(smmu->dev, "skipping command in error state:\n"); |
| 977 | for (i = 0; i < ARRAY_SIZE(cmd); ++i) |
| 978 | dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); |
| 979 | |
| 980 | /* Convert the erroneous command into a CMD_SYNC */ |
| 981 | if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) { |
| 982 | dev_err(smmu->dev, "failed to convert to CMD_SYNC\n"); |
| 983 | return; |
| 984 | } |
| 985 | |
Will Deacon | aea2037 | 2016-07-29 11:15:37 +0100 | [diff] [blame] | 986 | queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 987 | } |
| 988 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 989 | /* |
| 990 | * Command queue locking. |
| 991 | * This is a form of bastardised rwlock with the following major changes: |
| 992 | * |
| 993 | * - The only LOCK routines are exclusive_trylock() and shared_lock(). |
| 994 | * Neither have barrier semantics, and instead provide only a control |
| 995 | * dependency. |
| 996 | * |
| 997 | * - The UNLOCK routines are supplemented with shared_tryunlock(), which |
| 998 | * fails if the caller appears to be the last lock holder (yes, this is |
| 999 | * racy). All successful UNLOCK routines have RELEASE semantics. |
| 1000 | */ |
| 1001 | static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) |
Robin Murphy | 2f657ad | 2017-08-31 14:44:25 +0100 | [diff] [blame] | 1002 | { |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1003 | int val; |
Robin Murphy | 2f657ad | 2017-08-31 14:44:25 +0100 | [diff] [blame] | 1004 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1005 | /* |
| 1006 | * We can try to avoid the cmpxchg() loop by simply incrementing the |
| 1007 | * lock counter. When held in exclusive state, the lock counter is set |
| 1008 | * to INT_MIN so these increments won't hurt as the value will remain |
| 1009 | * negative. |
| 1010 | */ |
| 1011 | if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) |
| 1012 | return; |
Zhen Lei | 901510e | 2018-08-19 15:51:11 +0800 | [diff] [blame] | 1013 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1014 | do { |
| 1015 | val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); |
| 1016 | } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); |
| 1017 | } |
| 1018 | |
| 1019 | static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) |
| 1020 | { |
| 1021 | (void)atomic_dec_return_release(&cmdq->lock); |
| 1022 | } |
| 1023 | |
| 1024 | static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) |
| 1025 | { |
| 1026 | if (atomic_read(&cmdq->lock) == 1) |
| 1027 | return false; |
| 1028 | |
| 1029 | arm_smmu_cmdq_shared_unlock(cmdq); |
| 1030 | return true; |
| 1031 | } |
| 1032 | |
| 1033 | #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ |
| 1034 | ({ \ |
| 1035 | bool __ret; \ |
| 1036 | local_irq_save(flags); \ |
| 1037 | __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \ |
| 1038 | if (!__ret) \ |
| 1039 | local_irq_restore(flags); \ |
| 1040 | __ret; \ |
| 1041 | }) |
| 1042 | |
| 1043 | #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ |
| 1044 | ({ \ |
| 1045 | atomic_set_release(&cmdq->lock, 0); \ |
| 1046 | local_irq_restore(flags); \ |
| 1047 | }) |
| 1048 | |
| 1049 | |
| 1050 | /* |
| 1051 | * Command queue insertion. |
| 1052 | * This is made fiddly by our attempts to achieve some sort of scalability |
| 1053 | * since there is one queue shared amongst all of the CPUs in the system. If |
| 1054 | * you like mixed-size concurrency, dependency ordering and relaxed atomics, |
| 1055 | * then you'll *love* this monstrosity. |
| 1056 | * |
| 1057 | * The basic idea is to split the queue up into ranges of commands that are |
| 1058 | * owned by a given CPU; the owner may not have written all of the commands |
| 1059 | * itself, but is responsible for advancing the hardware prod pointer when |
| 1060 | * the time comes. The algorithm is roughly: |
| 1061 | * |
| 1062 | * 1. Allocate some space in the queue. At this point we also discover |
| 1063 | * whether the head of the queue is currently owned by another CPU, |
| 1064 | * or whether we are the owner. |
| 1065 | * |
| 1066 | * 2. Write our commands into our allocated slots in the queue. |
| 1067 | * |
| 1068 | * 3. Mark our slots as valid in arm_smmu_cmdq.valid_map. |
| 1069 | * |
| 1070 | * 4. If we are an owner: |
| 1071 | * a. Wait for the previous owner to finish. |
| 1072 | * b. Mark the queue head as unowned, which tells us the range |
| 1073 | * that we are responsible for publishing. |
| 1074 | * c. Wait for all commands in our owned range to become valid. |
| 1075 | * d. Advance the hardware prod pointer. |
| 1076 | * e. Tell the next owner we've finished. |
| 1077 | * |
| 1078 | * 5. If we are inserting a CMD_SYNC (we may or may not have been an |
| 1079 | * owner), then we need to stick around until it has completed: |
| 1080 | * a. If we have MSIs, the SMMU can write back into the CMD_SYNC |
| 1081 | * to clear the first 4 bytes. |
| 1082 | * b. Otherwise, we spin waiting for the hardware cons pointer to |
| 1083 | * advance past our command. |
| 1084 | * |
| 1085 | * The devil is in the details, particularly the use of locking for handling |
| 1086 | * SYNC completion and freeing up space in the queue before we think that it is |
| 1087 | * full. |
| 1088 | */ |
| 1089 | static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, |
| 1090 | u32 sprod, u32 eprod, bool set) |
| 1091 | { |
| 1092 | u32 swidx, sbidx, ewidx, ebidx; |
| 1093 | struct arm_smmu_ll_queue llq = { |
| 1094 | .max_n_shift = cmdq->q.llq.max_n_shift, |
| 1095 | .prod = sprod, |
| 1096 | }; |
| 1097 | |
| 1098 | ewidx = BIT_WORD(Q_IDX(&llq, eprod)); |
| 1099 | ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG; |
| 1100 | |
| 1101 | while (llq.prod != eprod) { |
| 1102 | unsigned long mask; |
| 1103 | atomic_long_t *ptr; |
| 1104 | u32 limit = BITS_PER_LONG; |
| 1105 | |
| 1106 | swidx = BIT_WORD(Q_IDX(&llq, llq.prod)); |
| 1107 | sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG; |
| 1108 | |
| 1109 | ptr = &cmdq->valid_map[swidx]; |
| 1110 | |
| 1111 | if ((swidx == ewidx) && (sbidx < ebidx)) |
| 1112 | limit = ebidx; |
| 1113 | |
| 1114 | mask = GENMASK(limit - 1, sbidx); |
| 1115 | |
| 1116 | /* |
| 1117 | * The valid bit is the inverse of the wrap bit. This means |
| 1118 | * that a zero-initialised queue is invalid and, after marking |
| 1119 | * all entries as valid, they become invalid again when we |
| 1120 | * wrap. |
| 1121 | */ |
| 1122 | if (set) { |
| 1123 | atomic_long_xor(mask, ptr); |
| 1124 | } else { /* Poll */ |
| 1125 | unsigned long valid; |
| 1126 | |
| 1127 | valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask; |
| 1128 | atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid); |
| 1129 | } |
| 1130 | |
| 1131 | llq.prod = queue_inc_prod_n(&llq, limit - sbidx); |
Robin Murphy | 2f657ad | 2017-08-31 14:44:25 +0100 | [diff] [blame] | 1132 | } |
| 1133 | } |
| 1134 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1135 | /* Mark all entries in the range [sprod, eprod) as valid */ |
| 1136 | static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, |
| 1137 | u32 sprod, u32 eprod) |
| 1138 | { |
| 1139 | __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); |
| 1140 | } |
| 1141 | |
| 1142 | /* Wait for all entries in the range [sprod, eprod) to become valid */ |
| 1143 | static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, |
| 1144 | u32 sprod, u32 eprod) |
| 1145 | { |
| 1146 | __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); |
| 1147 | } |
| 1148 | |
| 1149 | /* Wait for the command queue to become non-full */ |
| 1150 | static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, |
| 1151 | struct arm_smmu_ll_queue *llq) |
| 1152 | { |
| 1153 | unsigned long flags; |
| 1154 | struct arm_smmu_queue_poll qp; |
| 1155 | struct arm_smmu_cmdq *cmdq = &smmu->cmdq; |
| 1156 | int ret = 0; |
| 1157 | |
| 1158 | /* |
| 1159 | * Try to update our copy of cons by grabbing exclusive cmdq access. If |
| 1160 | * that fails, spin until somebody else updates it for us. |
| 1161 | */ |
| 1162 | if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { |
| 1163 | WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); |
| 1164 | arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); |
| 1165 | llq->val = READ_ONCE(cmdq->q.llq.val); |
| 1166 | return 0; |
| 1167 | } |
| 1168 | |
| 1169 | queue_poll_init(smmu, &qp); |
| 1170 | do { |
| 1171 | llq->val = READ_ONCE(smmu->cmdq.q.llq.val); |
| 1172 | if (!queue_full(llq)) |
| 1173 | break; |
| 1174 | |
| 1175 | ret = queue_poll(&qp); |
| 1176 | } while (!ret); |
| 1177 | |
| 1178 | return ret; |
| 1179 | } |
| 1180 | |
| 1181 | /* |
| 1182 | * Wait until the SMMU signals a CMD_SYNC completion MSI. |
| 1183 | * Must be called with the cmdq lock held in some capacity. |
| 1184 | */ |
| 1185 | static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, |
| 1186 | struct arm_smmu_ll_queue *llq) |
| 1187 | { |
| 1188 | int ret = 0; |
| 1189 | struct arm_smmu_queue_poll qp; |
| 1190 | struct arm_smmu_cmdq *cmdq = &smmu->cmdq; |
| 1191 | u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); |
| 1192 | |
| 1193 | queue_poll_init(smmu, &qp); |
| 1194 | |
| 1195 | /* |
| 1196 | * The MSI won't generate an event, since it's being written back |
| 1197 | * into the command queue. |
| 1198 | */ |
| 1199 | qp.wfe = false; |
| 1200 | smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp))); |
| 1201 | llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); |
| 1202 | return ret; |
| 1203 | } |
| 1204 | |
| 1205 | /* |
| 1206 | * Wait until the SMMU cons index passes llq->prod. |
| 1207 | * Must be called with the cmdq lock held in some capacity. |
| 1208 | */ |
| 1209 | static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, |
| 1210 | struct arm_smmu_ll_queue *llq) |
| 1211 | { |
| 1212 | struct arm_smmu_queue_poll qp; |
| 1213 | struct arm_smmu_cmdq *cmdq = &smmu->cmdq; |
| 1214 | u32 prod = llq->prod; |
| 1215 | int ret = 0; |
| 1216 | |
| 1217 | queue_poll_init(smmu, &qp); |
| 1218 | llq->val = READ_ONCE(smmu->cmdq.q.llq.val); |
| 1219 | do { |
| 1220 | if (queue_consumed(llq, prod)) |
| 1221 | break; |
| 1222 | |
| 1223 | ret = queue_poll(&qp); |
| 1224 | |
| 1225 | /* |
| 1226 | * This needs to be a readl() so that our subsequent call |
| 1227 | * to arm_smmu_cmdq_shared_tryunlock() can fail accurately. |
| 1228 | * |
| 1229 | * Specifically, we need to ensure that we observe all |
| 1230 | * shared_lock()s by other CMD_SYNCs that share our owner, |
| 1231 | * so that a failing call to tryunlock() means that we're |
| 1232 | * the last one out and therefore we can safely advance |
| 1233 | * cmdq->q.llq.cons. Roughly speaking: |
| 1234 | * |
| 1235 | * CPU 0 CPU1 CPU2 (us) |
| 1236 | * |
| 1237 | * if (sync) |
| 1238 | * shared_lock(); |
| 1239 | * |
| 1240 | * dma_wmb(); |
| 1241 | * set_valid_map(); |
| 1242 | * |
| 1243 | * if (owner) { |
| 1244 | * poll_valid_map(); |
| 1245 | * <control dependency> |
| 1246 | * writel(prod_reg); |
| 1247 | * |
| 1248 | * readl(cons_reg); |
| 1249 | * tryunlock(); |
| 1250 | * |
| 1251 | * Requires us to see CPU 0's shared_lock() acquisition. |
| 1252 | */ |
| 1253 | llq->cons = readl(cmdq->q.cons_reg); |
| 1254 | } while (!ret); |
| 1255 | |
| 1256 | return ret; |
| 1257 | } |
| 1258 | |
| 1259 | static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, |
| 1260 | struct arm_smmu_ll_queue *llq) |
| 1261 | { |
| 1262 | if (smmu->features & ARM_SMMU_FEAT_MSI && |
| 1263 | smmu->features & ARM_SMMU_FEAT_COHERENCY) |
| 1264 | return __arm_smmu_cmdq_poll_until_msi(smmu, llq); |
| 1265 | |
| 1266 | return __arm_smmu_cmdq_poll_until_consumed(smmu, llq); |
| 1267 | } |
| 1268 | |
| 1269 | static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, |
| 1270 | u32 prod, int n) |
| 1271 | { |
| 1272 | int i; |
| 1273 | struct arm_smmu_ll_queue llq = { |
| 1274 | .max_n_shift = cmdq->q.llq.max_n_shift, |
| 1275 | .prod = prod, |
| 1276 | }; |
| 1277 | |
| 1278 | for (i = 0; i < n; ++i) { |
| 1279 | u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS]; |
| 1280 | |
| 1281 | prod = queue_inc_prod_n(&llq, i); |
| 1282 | queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); |
| 1283 | } |
| 1284 | } |
| 1285 | |
Will Deacon | 05cbaf4 | 2019-08-20 13:25:36 +0100 | [diff] [blame] | 1286 | /* |
| 1287 | * This is the actual insertion function, and provides the following |
| 1288 | * ordering guarantees to callers: |
| 1289 | * |
| 1290 | * - There is a dma_wmb() before publishing any commands to the queue. |
| 1291 | * This can be relied upon to order prior writes to data structures |
| 1292 | * in memory (such as a CD or an STE) before the command. |
| 1293 | * |
| 1294 | * - On completion of a CMD_SYNC, there is a control dependency. |
| 1295 | * This can be relied upon to order subsequent writes to memory (e.g. |
| 1296 | * freeing an IOVA) after completion of the CMD_SYNC. |
| 1297 | * |
| 1298 | * - Command insertion is totally ordered, so if two CPUs each race to |
| 1299 | * insert their own list of commands then all of the commands from one |
| 1300 | * CPU will appear before any of the commands from the other CPU. |
| 1301 | */ |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1302 | static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, |
| 1303 | u64 *cmds, int n, bool sync) |
| 1304 | { |
| 1305 | u64 cmd_sync[CMDQ_ENT_DWORDS]; |
| 1306 | u32 prod; |
| 1307 | unsigned long flags; |
| 1308 | bool owner; |
| 1309 | struct arm_smmu_cmdq *cmdq = &smmu->cmdq; |
| 1310 | struct arm_smmu_ll_queue llq = { |
| 1311 | .max_n_shift = cmdq->q.llq.max_n_shift, |
| 1312 | }, head = llq; |
| 1313 | int ret = 0; |
| 1314 | |
| 1315 | /* 1. Allocate some space in the queue */ |
| 1316 | local_irq_save(flags); |
| 1317 | llq.val = READ_ONCE(cmdq->q.llq.val); |
| 1318 | do { |
| 1319 | u64 old; |
| 1320 | |
| 1321 | while (!queue_has_space(&llq, n + sync)) { |
| 1322 | local_irq_restore(flags); |
| 1323 | if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq)) |
| 1324 | dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); |
| 1325 | local_irq_save(flags); |
| 1326 | } |
| 1327 | |
| 1328 | head.cons = llq.cons; |
| 1329 | head.prod = queue_inc_prod_n(&llq, n + sync) | |
| 1330 | CMDQ_PROD_OWNED_FLAG; |
| 1331 | |
| 1332 | old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); |
| 1333 | if (old == llq.val) |
| 1334 | break; |
| 1335 | |
| 1336 | llq.val = old; |
| 1337 | } while (1); |
| 1338 | owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG); |
| 1339 | head.prod &= ~CMDQ_PROD_OWNED_FLAG; |
| 1340 | llq.prod &= ~CMDQ_PROD_OWNED_FLAG; |
| 1341 | |
| 1342 | /* |
| 1343 | * 2. Write our commands into the queue |
| 1344 | * Dependency ordering from the cmpxchg() loop above. |
| 1345 | */ |
| 1346 | arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); |
| 1347 | if (sync) { |
| 1348 | prod = queue_inc_prod_n(&llq, n); |
| 1349 | arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod); |
| 1350 | queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); |
| 1351 | |
| 1352 | /* |
| 1353 | * In order to determine completion of our CMD_SYNC, we must |
| 1354 | * ensure that the queue can't wrap twice without us noticing. |
| 1355 | * We achieve that by taking the cmdq lock as shared before |
| 1356 | * marking our slot as valid. |
| 1357 | */ |
| 1358 | arm_smmu_cmdq_shared_lock(cmdq); |
| 1359 | } |
| 1360 | |
| 1361 | /* 3. Mark our slots as valid, ensuring commands are visible first */ |
| 1362 | dma_wmb(); |
| 1363 | arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); |
| 1364 | |
| 1365 | /* 4. If we are the owner, take control of the SMMU hardware */ |
| 1366 | if (owner) { |
| 1367 | /* a. Wait for previous owner to finish */ |
| 1368 | atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); |
| 1369 | |
| 1370 | /* b. Stop gathering work by clearing the owned flag */ |
| 1371 | prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG, |
| 1372 | &cmdq->q.llq.atomic.prod); |
| 1373 | prod &= ~CMDQ_PROD_OWNED_FLAG; |
| 1374 | |
| 1375 | /* |
| 1376 | * c. Wait for any gathered work to be written to the queue. |
| 1377 | * Note that we read our own entries so that we have the control |
| 1378 | * dependency required by (d). |
| 1379 | */ |
| 1380 | arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); |
| 1381 | |
| 1382 | /* |
| 1383 | * d. Advance the hardware prod pointer |
| 1384 | * Control dependency ordering from the entries becoming valid. |
| 1385 | */ |
| 1386 | writel_relaxed(prod, cmdq->q.prod_reg); |
| 1387 | |
| 1388 | /* |
| 1389 | * e. Tell the next owner we're done |
| 1390 | * Make sure we've updated the hardware first, so that we don't |
| 1391 | * race to update prod and potentially move it backwards. |
| 1392 | */ |
| 1393 | atomic_set_release(&cmdq->owner_prod, prod); |
| 1394 | } |
| 1395 | |
| 1396 | /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */ |
| 1397 | if (sync) { |
| 1398 | llq.prod = queue_inc_prod_n(&llq, n); |
| 1399 | ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); |
| 1400 | if (ret) { |
| 1401 | dev_err_ratelimited(smmu->dev, |
| 1402 | "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n", |
| 1403 | llq.prod, |
| 1404 | readl_relaxed(cmdq->q.prod_reg), |
| 1405 | readl_relaxed(cmdq->q.cons_reg)); |
| 1406 | } |
| 1407 | |
| 1408 | /* |
| 1409 | * Try to unlock the cmq lock. This will fail if we're the last |
| 1410 | * reader, in which case we can safely update cmdq->q.llq.cons |
| 1411 | */ |
| 1412 | if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { |
| 1413 | WRITE_ONCE(cmdq->q.llq.cons, llq.cons); |
| 1414 | arm_smmu_cmdq_shared_unlock(cmdq); |
| 1415 | } |
| 1416 | } |
| 1417 | |
| 1418 | local_irq_restore(flags); |
| 1419 | return ret; |
| 1420 | } |
| 1421 | |
| 1422 | static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, |
| 1423 | struct arm_smmu_cmdq_ent *ent) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1424 | { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1425 | u64 cmd[CMDQ_ENT_DWORDS]; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1426 | |
| 1427 | if (arm_smmu_cmdq_build_cmd(cmd, ent)) { |
| 1428 | dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", |
| 1429 | ent->opcode); |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1430 | return -EINVAL; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1431 | } |
| 1432 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1433 | return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false); |
Will Deacon | 4980659 | 2017-10-19 16:41:53 +0100 | [diff] [blame] | 1434 | } |
| 1435 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 1436 | static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) |
Will Deacon | 4980659 | 2017-10-19 16:41:53 +0100 | [diff] [blame] | 1437 | { |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1438 | return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true); |
Robin Murphy | 2f657ad | 2017-08-31 14:44:25 +0100 | [diff] [blame] | 1439 | } |
| 1440 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1441 | /* Context descriptor manipulation functions */ |
| 1442 | static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr) |
| 1443 | { |
| 1444 | u64 val = 0; |
| 1445 | |
| 1446 | /* Repack the TCR. Just care about TTBR0 for now */ |
| 1447 | val |= ARM_SMMU_TCR2CD(tcr, T0SZ); |
| 1448 | val |= ARM_SMMU_TCR2CD(tcr, TG0); |
| 1449 | val |= ARM_SMMU_TCR2CD(tcr, IRGN0); |
| 1450 | val |= ARM_SMMU_TCR2CD(tcr, ORGN0); |
| 1451 | val |= ARM_SMMU_TCR2CD(tcr, SH0); |
| 1452 | val |= ARM_SMMU_TCR2CD(tcr, EPD0); |
| 1453 | val |= ARM_SMMU_TCR2CD(tcr, EPD1); |
| 1454 | val |= ARM_SMMU_TCR2CD(tcr, IPS); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1455 | |
| 1456 | return val; |
| 1457 | } |
| 1458 | |
| 1459 | static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu, |
| 1460 | struct arm_smmu_s1_cfg *cfg) |
| 1461 | { |
| 1462 | u64 val; |
| 1463 | |
| 1464 | /* |
| 1465 | * We don't need to issue any invalidation here, as we'll invalidate |
| 1466 | * the STE when installing the new entry anyway. |
| 1467 | */ |
| 1468 | val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) | |
| 1469 | #ifdef __BIG_ENDIAN |
| 1470 | CTXDESC_CD_0_ENDI | |
| 1471 | #endif |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1472 | CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET | |
| 1473 | CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1474 | CTXDESC_CD_0_V; |
Yisheng Xie | 9cff86fd2 | 2017-09-21 20:36:07 +0800 | [diff] [blame] | 1475 | |
| 1476 | /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */ |
| 1477 | if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE) |
| 1478 | val |= CTXDESC_CD_0_S; |
| 1479 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1480 | cfg->cdptr[0] = cpu_to_le64(val); |
| 1481 | |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 1482 | val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1483 | cfg->cdptr[1] = cpu_to_le64(val); |
| 1484 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1485 | cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1486 | } |
| 1487 | |
| 1488 | /* Stream table manipulation functions */ |
| 1489 | static void |
| 1490 | arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) |
| 1491 | { |
| 1492 | u64 val = 0; |
| 1493 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1494 | val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 1495 | val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1496 | |
| 1497 | *dst = cpu_to_le64(val); |
| 1498 | } |
| 1499 | |
| 1500 | static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) |
| 1501 | { |
| 1502 | struct arm_smmu_cmdq_ent cmd = { |
| 1503 | .opcode = CMDQ_OP_CFGI_STE, |
| 1504 | .cfgi = { |
| 1505 | .sid = sid, |
| 1506 | .leaf = true, |
| 1507 | }, |
| 1508 | }; |
| 1509 | |
| 1510 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
Robin Murphy | 2f657ad | 2017-08-31 14:44:25 +0100 | [diff] [blame] | 1511 | arm_smmu_cmdq_issue_sync(smmu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1512 | } |
| 1513 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1514 | static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, |
| 1515 | __le64 *dst) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1516 | { |
| 1517 | /* |
| 1518 | * This is hideously complicated, but we only really care about |
| 1519 | * three cases at the moment: |
| 1520 | * |
Will Deacon | beb3c6a | 2017-01-06 16:27:30 +0000 | [diff] [blame] | 1521 | * 1. Invalid (all zero) -> bypass/fault (init) |
| 1522 | * 2. Bypass/fault -> translation/bypass (attach) |
| 1523 | * 3. Translation/bypass -> bypass/fault (detach) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1524 | * |
| 1525 | * Given that we can't update the STE atomically and the SMMU |
| 1526 | * doesn't read the thing in a defined order, that leaves us |
| 1527 | * with the following maintenance requirements: |
| 1528 | * |
| 1529 | * 1. Update Config, return (init time STEs aren't live) |
| 1530 | * 2. Write everything apart from dword 0, sync, write dword 0, sync |
| 1531 | * 3. Update Config, sync |
| 1532 | */ |
| 1533 | u64 val = le64_to_cpu(dst[0]); |
| 1534 | bool ste_live = false; |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1535 | struct arm_smmu_device *smmu = NULL; |
| 1536 | struct arm_smmu_s1_cfg *s1_cfg = NULL; |
| 1537 | struct arm_smmu_s2_cfg *s2_cfg = NULL; |
| 1538 | struct arm_smmu_domain *smmu_domain = NULL; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1539 | struct arm_smmu_cmdq_ent prefetch_cmd = { |
| 1540 | .opcode = CMDQ_OP_PREFETCH_CFG, |
| 1541 | .prefetch = { |
| 1542 | .sid = sid, |
| 1543 | }, |
| 1544 | }; |
| 1545 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1546 | if (master) { |
| 1547 | smmu_domain = master->domain; |
| 1548 | smmu = master->smmu; |
| 1549 | } |
| 1550 | |
| 1551 | if (smmu_domain) { |
| 1552 | switch (smmu_domain->stage) { |
| 1553 | case ARM_SMMU_DOMAIN_S1: |
| 1554 | s1_cfg = &smmu_domain->s1_cfg; |
| 1555 | break; |
| 1556 | case ARM_SMMU_DOMAIN_S2: |
| 1557 | case ARM_SMMU_DOMAIN_NESTED: |
| 1558 | s2_cfg = &smmu_domain->s2_cfg; |
| 1559 | break; |
| 1560 | default: |
| 1561 | break; |
| 1562 | } |
| 1563 | } |
| 1564 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1565 | if (val & STRTAB_STE_0_V) { |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1566 | switch (FIELD_GET(STRTAB_STE_0_CFG, val)) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1567 | case STRTAB_STE_0_CFG_BYPASS: |
| 1568 | break; |
| 1569 | case STRTAB_STE_0_CFG_S1_TRANS: |
| 1570 | case STRTAB_STE_0_CFG_S2_TRANS: |
| 1571 | ste_live = true; |
| 1572 | break; |
Will Deacon | 5bc0a11 | 2016-08-16 14:29:16 +0100 | [diff] [blame] | 1573 | case STRTAB_STE_0_CFG_ABORT: |
Anders Roxell | 11f4fe9 | 2019-07-30 17:20:11 +0200 | [diff] [blame] | 1574 | BUG_ON(!disable_bypass); |
| 1575 | break; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1576 | default: |
| 1577 | BUG(); /* STE corruption */ |
| 1578 | } |
| 1579 | } |
| 1580 | |
Nate Watterson | 810871c | 2016-12-20 23:11:48 -0500 | [diff] [blame] | 1581 | /* Nuke the existing STE_0 value, as we're going to rewrite it */ |
Will Deacon | beb3c6a | 2017-01-06 16:27:30 +0000 | [diff] [blame] | 1582 | val = STRTAB_STE_0_V; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1583 | |
Will Deacon | beb3c6a | 2017-01-06 16:27:30 +0000 | [diff] [blame] | 1584 | /* Bypass/fault */ |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1585 | if (!smmu_domain || !(s1_cfg || s2_cfg)) { |
| 1586 | if (!smmu_domain && disable_bypass) |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1587 | val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT); |
Will Deacon | beb3c6a | 2017-01-06 16:27:30 +0000 | [diff] [blame] | 1588 | else |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1589 | val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); |
Will Deacon | beb3c6a | 2017-01-06 16:27:30 +0000 | [diff] [blame] | 1590 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1591 | dst[0] = cpu_to_le64(val); |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1592 | dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, |
| 1593 | STRTAB_STE_1_SHCFG_INCOMING)); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1594 | dst[2] = 0; /* Nuke the VMID */ |
Will Deacon | 704c038 | 2017-10-05 16:49:37 +0100 | [diff] [blame] | 1595 | /* |
| 1596 | * The SMMU can perform negative caching, so we must sync |
| 1597 | * the STE regardless of whether the old value was live. |
| 1598 | */ |
| 1599 | if (smmu) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1600 | arm_smmu_sync_ste_for_sid(smmu, sid); |
| 1601 | return; |
| 1602 | } |
| 1603 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1604 | if (s1_cfg) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1605 | BUG_ON(ste_live); |
| 1606 | dst[1] = cpu_to_le64( |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1607 | FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | |
| 1608 | FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | |
| 1609 | FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1610 | FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1)); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1611 | |
Yisheng Xie | 9cff86fd2 | 2017-09-21 20:36:07 +0800 | [diff] [blame] | 1612 | if (smmu->features & ARM_SMMU_FEAT_STALLS && |
| 1613 | !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) |
Prem Mallappa | 6380be0 | 2015-12-14 22:01:23 +0530 | [diff] [blame] | 1614 | dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); |
| 1615 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1616 | val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1617 | FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1618 | } |
| 1619 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1620 | if (s2_cfg) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1621 | BUG_ON(ste_live); |
| 1622 | dst[2] = cpu_to_le64( |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1623 | FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | |
| 1624 | FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1625 | #ifdef __BIG_ENDIAN |
| 1626 | STRTAB_STE_2_S2ENDI | |
| 1627 | #endif |
| 1628 | STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 | |
| 1629 | STRTAB_STE_2_S2R); |
| 1630 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1631 | dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1632 | |
Robin Murphy | ba08bdc | 2018-03-26 13:35:11 +0100 | [diff] [blame] | 1633 | val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1634 | } |
| 1635 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 1636 | if (master->ats_enabled) |
| 1637 | dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS, |
| 1638 | STRTAB_STE_1_EATS_TRANS)); |
| 1639 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1640 | arm_smmu_sync_ste_for_sid(smmu, sid); |
| 1641 | dst[0] = cpu_to_le64(val); |
| 1642 | arm_smmu_sync_ste_for_sid(smmu, sid); |
| 1643 | |
| 1644 | /* It's likely that we'll want to use the new STE soon */ |
Zhen Lei | 5e92946 | 2015-07-07 04:30:18 +0100 | [diff] [blame] | 1645 | if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) |
| 1646 | arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1647 | } |
| 1648 | |
| 1649 | static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) |
| 1650 | { |
| 1651 | unsigned int i; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1652 | |
| 1653 | for (i = 0; i < nent; ++i) { |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 1654 | arm_smmu_write_strtab_ent(NULL, -1, strtab); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1655 | strtab += STRTAB_STE_DWORDS; |
| 1656 | } |
| 1657 | } |
| 1658 | |
| 1659 | static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) |
| 1660 | { |
| 1661 | size_t size; |
| 1662 | void *strtab; |
| 1663 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; |
| 1664 | struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT]; |
| 1665 | |
| 1666 | if (desc->l2ptr) |
| 1667 | return 0; |
| 1668 | |
| 1669 | size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); |
Zhen Lei | 69146e7 | 2015-06-26 09:32:58 +0100 | [diff] [blame] | 1670 | strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1671 | |
| 1672 | desc->span = STRTAB_SPLIT + 1; |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 1673 | desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma, |
| 1674 | GFP_KERNEL | __GFP_ZERO); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1675 | if (!desc->l2ptr) { |
| 1676 | dev_err(smmu->dev, |
| 1677 | "failed to allocate l2 stream table for SID %u\n", |
| 1678 | sid); |
| 1679 | return -ENOMEM; |
| 1680 | } |
| 1681 | |
| 1682 | arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT); |
| 1683 | arm_smmu_write_strtab_l1_desc(strtab, desc); |
| 1684 | return 0; |
| 1685 | } |
| 1686 | |
| 1687 | /* IRQ and event handlers */ |
| 1688 | static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) |
| 1689 | { |
| 1690 | int i; |
| 1691 | struct arm_smmu_device *smmu = dev; |
| 1692 | struct arm_smmu_queue *q = &smmu->evtq.q; |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 1693 | struct arm_smmu_ll_queue *llq = &q->llq; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1694 | u64 evt[EVTQ_ENT_DWORDS]; |
| 1695 | |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1696 | do { |
| 1697 | while (!queue_remove_raw(q, evt)) { |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 1698 | u8 id = FIELD_GET(EVTQ_0_ID, evt[0]); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1699 | |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1700 | dev_info(smmu->dev, "event 0x%02x received:\n", id); |
| 1701 | for (i = 0; i < ARRAY_SIZE(evt); ++i) |
| 1702 | dev_info(smmu->dev, "\t0x%016llx\n", |
| 1703 | (unsigned long long)evt[i]); |
| 1704 | |
| 1705 | } |
| 1706 | |
| 1707 | /* |
| 1708 | * Not much we can do on overflow, so scream and pretend we're |
| 1709 | * trying harder. |
| 1710 | */ |
Will Deacon | 2a8868f | 2019-07-02 17:12:24 +0100 | [diff] [blame] | 1711 | if (queue_sync_prod_in(q) == -EOVERFLOW) |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1712 | dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 1713 | } while (!queue_empty(llq)); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1714 | |
| 1715 | /* Sync our overflow flag, as we believe we're up to speed */ |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 1716 | llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | |
| 1717 | Q_IDX(llq, llq->cons); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1718 | return IRQ_HANDLED; |
| 1719 | } |
| 1720 | |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1721 | static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1722 | { |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1723 | u32 sid, ssid; |
| 1724 | u16 grpid; |
| 1725 | bool ssv, last; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1726 | |
Robin Murphy | 7417b99 | 2018-03-26 13:35:12 +0100 | [diff] [blame] | 1727 | sid = FIELD_GET(PRIQ_0_SID, evt[0]); |
| 1728 | ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]); |
| 1729 | ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0; |
| 1730 | last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]); |
| 1731 | grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1732 | |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1733 | dev_info(smmu->dev, "unexpected PRI request received:\n"); |
| 1734 | dev_info(smmu->dev, |
| 1735 | "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n", |
| 1736 | sid, ssid, grpid, last ? "L" : "", |
| 1737 | evt[0] & PRIQ_0_PERM_PRIV ? "" : "un", |
| 1738 | evt[0] & PRIQ_0_PERM_READ ? "R" : "", |
| 1739 | evt[0] & PRIQ_0_PERM_WRITE ? "W" : "", |
| 1740 | evt[0] & PRIQ_0_PERM_EXEC ? "X" : "", |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 1741 | evt[1] & PRIQ_1_ADDR_MASK); |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1742 | |
| 1743 | if (last) { |
| 1744 | struct arm_smmu_cmdq_ent cmd = { |
| 1745 | .opcode = CMDQ_OP_PRI_RESP, |
| 1746 | .substream_valid = ssv, |
| 1747 | .pri = { |
| 1748 | .sid = sid, |
| 1749 | .ssid = ssid, |
| 1750 | .grpid = grpid, |
| 1751 | .resp = PRI_RESP_DENY, |
| 1752 | }, |
| 1753 | }; |
| 1754 | |
| 1755 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
| 1756 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1757 | } |
| 1758 | |
| 1759 | static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) |
| 1760 | { |
| 1761 | struct arm_smmu_device *smmu = dev; |
| 1762 | struct arm_smmu_queue *q = &smmu->priq.q; |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 1763 | struct arm_smmu_ll_queue *llq = &q->llq; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1764 | u64 evt[PRIQ_ENT_DWORDS]; |
| 1765 | |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1766 | do { |
| 1767 | while (!queue_remove_raw(q, evt)) |
| 1768 | arm_smmu_handle_ppr(smmu, evt); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1769 | |
Will Deacon | 2a8868f | 2019-07-02 17:12:24 +0100 | [diff] [blame] | 1770 | if (queue_sync_prod_in(q) == -EOVERFLOW) |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1771 | dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 1772 | } while (!queue_empty(llq)); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1773 | |
| 1774 | /* Sync our overflow flag, as we believe we're up to speed */ |
Will Deacon | 7c288a5 | 2019-07-02 17:16:16 +0100 | [diff] [blame] | 1775 | llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | |
| 1776 | Q_IDX(llq, llq->cons); |
| 1777 | queue_sync_cons_out(q); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1778 | return IRQ_HANDLED; |
| 1779 | } |
| 1780 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1781 | static int arm_smmu_device_disable(struct arm_smmu_device *smmu); |
| 1782 | |
| 1783 | static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) |
| 1784 | { |
Prem Mallappa | 324ba10 | 2015-12-14 22:01:14 +0530 | [diff] [blame] | 1785 | u32 gerror, gerrorn, active; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1786 | struct arm_smmu_device *smmu = dev; |
| 1787 | |
| 1788 | gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); |
| 1789 | gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); |
| 1790 | |
Prem Mallappa | 324ba10 | 2015-12-14 22:01:14 +0530 | [diff] [blame] | 1791 | active = gerror ^ gerrorn; |
| 1792 | if (!(active & GERROR_ERR_MASK)) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1793 | return IRQ_NONE; /* No errors pending */ |
| 1794 | |
| 1795 | dev_warn(smmu->dev, |
| 1796 | "unexpected global error reported (0x%08x), this could be serious\n", |
Prem Mallappa | 324ba10 | 2015-12-14 22:01:14 +0530 | [diff] [blame] | 1797 | active); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1798 | |
Prem Mallappa | 324ba10 | 2015-12-14 22:01:14 +0530 | [diff] [blame] | 1799 | if (active & GERROR_SFM_ERR) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1800 | dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); |
| 1801 | arm_smmu_device_disable(smmu); |
| 1802 | } |
| 1803 | |
Prem Mallappa | 324ba10 | 2015-12-14 22:01:14 +0530 | [diff] [blame] | 1804 | if (active & GERROR_MSI_GERROR_ABT_ERR) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1805 | dev_warn(smmu->dev, "GERROR MSI write aborted\n"); |
| 1806 | |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1807 | if (active & GERROR_MSI_PRIQ_ABT_ERR) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1808 | dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1809 | |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 1810 | if (active & GERROR_MSI_EVTQ_ABT_ERR) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1811 | dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1812 | |
Robin Murphy | dce032a | 2017-08-31 14:44:26 +0100 | [diff] [blame] | 1813 | if (active & GERROR_MSI_CMDQ_ABT_ERR) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1814 | dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1815 | |
Prem Mallappa | 324ba10 | 2015-12-14 22:01:14 +0530 | [diff] [blame] | 1816 | if (active & GERROR_PRIQ_ABT_ERR) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1817 | dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); |
| 1818 | |
Prem Mallappa | 324ba10 | 2015-12-14 22:01:14 +0530 | [diff] [blame] | 1819 | if (active & GERROR_EVTQ_ABT_ERR) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1820 | dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); |
| 1821 | |
Prem Mallappa | 324ba10 | 2015-12-14 22:01:14 +0530 | [diff] [blame] | 1822 | if (active & GERROR_CMDQ_ERR) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1823 | arm_smmu_cmdq_skip_err(smmu); |
| 1824 | |
| 1825 | writel(gerror, smmu->base + ARM_SMMU_GERRORN); |
| 1826 | return IRQ_HANDLED; |
| 1827 | } |
| 1828 | |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 1829 | static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev) |
| 1830 | { |
| 1831 | struct arm_smmu_device *smmu = dev; |
| 1832 | |
| 1833 | arm_smmu_evtq_thread(irq, dev); |
| 1834 | if (smmu->features & ARM_SMMU_FEAT_PRI) |
| 1835 | arm_smmu_priq_thread(irq, dev); |
| 1836 | |
| 1837 | return IRQ_HANDLED; |
| 1838 | } |
| 1839 | |
| 1840 | static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev) |
| 1841 | { |
| 1842 | arm_smmu_gerror_handler(irq, dev); |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 1843 | return IRQ_WAKE_THREAD; |
| 1844 | } |
| 1845 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 1846 | static void |
| 1847 | arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, |
| 1848 | struct arm_smmu_cmdq_ent *cmd) |
| 1849 | { |
| 1850 | size_t log2_span; |
| 1851 | size_t span_mask; |
| 1852 | /* ATC invalidates are always on 4096-bytes pages */ |
| 1853 | size_t inval_grain_shift = 12; |
| 1854 | unsigned long page_start, page_end; |
| 1855 | |
| 1856 | *cmd = (struct arm_smmu_cmdq_ent) { |
| 1857 | .opcode = CMDQ_OP_ATC_INV, |
| 1858 | .substream_valid = !!ssid, |
| 1859 | .atc.ssid = ssid, |
| 1860 | }; |
| 1861 | |
| 1862 | if (!size) { |
| 1863 | cmd->atc.size = ATC_INV_SIZE_ALL; |
| 1864 | return; |
| 1865 | } |
| 1866 | |
| 1867 | page_start = iova >> inval_grain_shift; |
| 1868 | page_end = (iova + size - 1) >> inval_grain_shift; |
| 1869 | |
| 1870 | /* |
| 1871 | * In an ATS Invalidate Request, the address must be aligned on the |
| 1872 | * range size, which must be a power of two number of page sizes. We |
| 1873 | * thus have to choose between grossly over-invalidating the region, or |
| 1874 | * splitting the invalidation into multiple commands. For simplicity |
| 1875 | * we'll go with the first solution, but should refine it in the future |
| 1876 | * if multiple commands are shown to be more efficient. |
| 1877 | * |
| 1878 | * Find the smallest power of two that covers the range. The most |
| 1879 | * significant differing bit between the start and end addresses, |
| 1880 | * fls(start ^ end), indicates the required span. For example: |
| 1881 | * |
| 1882 | * We want to invalidate pages [8; 11]. This is already the ideal range: |
| 1883 | * x = 0b1000 ^ 0b1011 = 0b11 |
| 1884 | * span = 1 << fls(x) = 4 |
| 1885 | * |
| 1886 | * To invalidate pages [7; 10], we need to invalidate [0; 15]: |
| 1887 | * x = 0b0111 ^ 0b1010 = 0b1101 |
| 1888 | * span = 1 << fls(x) = 16 |
| 1889 | */ |
| 1890 | log2_span = fls_long(page_start ^ page_end); |
| 1891 | span_mask = (1ULL << log2_span) - 1; |
| 1892 | |
| 1893 | page_start &= ~span_mask; |
| 1894 | |
| 1895 | cmd->atc.addr = page_start << inval_grain_shift; |
| 1896 | cmd->atc.size = log2_span; |
| 1897 | } |
| 1898 | |
| 1899 | static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, |
| 1900 | struct arm_smmu_cmdq_ent *cmd) |
| 1901 | { |
| 1902 | int i; |
| 1903 | |
| 1904 | if (!master->ats_enabled) |
| 1905 | return 0; |
| 1906 | |
| 1907 | for (i = 0; i < master->num_sids; i++) { |
| 1908 | cmd->atc.sid = master->sids[i]; |
| 1909 | arm_smmu_cmdq_issue_cmd(master->smmu, cmd); |
| 1910 | } |
| 1911 | |
| 1912 | return arm_smmu_cmdq_issue_sync(master->smmu); |
| 1913 | } |
| 1914 | |
| 1915 | static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, |
| 1916 | int ssid, unsigned long iova, size_t size) |
| 1917 | { |
| 1918 | int ret = 0; |
| 1919 | unsigned long flags; |
| 1920 | struct arm_smmu_cmdq_ent cmd; |
| 1921 | struct arm_smmu_master *master; |
| 1922 | |
| 1923 | if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) |
| 1924 | return 0; |
| 1925 | |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 1926 | /* |
| 1927 | * Ensure that we've completed prior invalidation of the main TLBs |
| 1928 | * before we read 'nr_ats_masters' in case of a concurrent call to |
| 1929 | * arm_smmu_enable_ats(): |
| 1930 | * |
| 1931 | * // unmap() // arm_smmu_enable_ats() |
| 1932 | * TLBI+SYNC atomic_inc(&nr_ats_masters); |
| 1933 | * smp_mb(); [...] |
| 1934 | * atomic_read(&nr_ats_masters); pci_enable_ats() // writel() |
| 1935 | * |
| 1936 | * Ensures that we always see the incremented 'nr_ats_masters' count if |
| 1937 | * ATS was enabled at the PCI device before completion of the TLBI. |
| 1938 | */ |
| 1939 | smp_mb(); |
| 1940 | if (!atomic_read(&smmu_domain->nr_ats_masters)) |
| 1941 | return 0; |
| 1942 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 1943 | arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); |
| 1944 | |
| 1945 | spin_lock_irqsave(&smmu_domain->devices_lock, flags); |
| 1946 | list_for_each_entry(master, &smmu_domain->devices, domain_head) |
| 1947 | ret |= arm_smmu_atc_inv_master(master, &cmd); |
| 1948 | spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); |
| 1949 | |
| 1950 | return ret ? -ETIMEDOUT : 0; |
| 1951 | } |
| 1952 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1953 | /* IO_PGTABLE API */ |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1954 | static void arm_smmu_tlb_inv_context(void *cookie) |
| 1955 | { |
| 1956 | struct arm_smmu_domain *smmu_domain = cookie; |
| 1957 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 1958 | struct arm_smmu_cmdq_ent cmd; |
| 1959 | |
| 1960 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { |
| 1961 | cmd.opcode = CMDQ_OP_TLBI_NH_ASID; |
| 1962 | cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; |
| 1963 | cmd.tlbi.vmid = 0; |
| 1964 | } else { |
| 1965 | cmd.opcode = CMDQ_OP_TLBI_S12_VMALL; |
| 1966 | cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; |
| 1967 | } |
| 1968 | |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 1969 | /* |
| 1970 | * NOTE: when io-pgtable is in non-strict mode, we may get here with |
| 1971 | * PTEs previously cleared by unmaps on the current CPU not yet visible |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 1972 | * to the SMMU. We are relying on the dma_wmb() implicit during cmd |
| 1973 | * insertion to guarantee those are observed before the TLBI. Do be |
| 1974 | * careful, 007. |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 1975 | */ |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1976 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
Andrew Murray | 5e73107 | 2018-10-10 11:29:27 +0100 | [diff] [blame] | 1977 | arm_smmu_cmdq_issue_sync(smmu); |
Will Deacon | 353e3cf | 2019-08-20 15:12:12 +0100 | [diff] [blame] | 1978 | arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1979 | } |
| 1980 | |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 1981 | static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, |
| 1982 | size_t granule, bool leaf, |
| 1983 | struct arm_smmu_domain *smmu_domain) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1984 | { |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 1985 | u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1986 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
Will Deacon | 353e3cf | 2019-08-20 15:12:12 +0100 | [diff] [blame] | 1987 | unsigned long start = iova, end = iova + size; |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 1988 | int i = 0; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1989 | struct arm_smmu_cmdq_ent cmd = { |
| 1990 | .tlbi = { |
| 1991 | .leaf = leaf, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1992 | }, |
| 1993 | }; |
| 1994 | |
Will Deacon | 7314ca8 | 2019-08-21 12:38:15 +0100 | [diff] [blame] | 1995 | if (!size) |
| 1996 | return; |
| 1997 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 1998 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { |
| 1999 | cmd.opcode = CMDQ_OP_TLBI_NH_VA; |
| 2000 | cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; |
| 2001 | } else { |
| 2002 | cmd.opcode = CMDQ_OP_TLBI_S2_IPA; |
| 2003 | cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; |
| 2004 | } |
| 2005 | |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 2006 | while (iova < end) { |
| 2007 | if (i == CMDQ_BATCH_ENTRIES) { |
| 2008 | arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false); |
| 2009 | i = 0; |
| 2010 | } |
| 2011 | |
| 2012 | cmd.tlbi.addr = iova; |
| 2013 | arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd); |
| 2014 | iova += granule; |
| 2015 | i++; |
| 2016 | } |
| 2017 | |
| 2018 | arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true); |
Will Deacon | 353e3cf | 2019-08-20 15:12:12 +0100 | [diff] [blame] | 2019 | |
| 2020 | /* |
| 2021 | * Unfortunately, this can't be leaf-only since we may have |
| 2022 | * zapped an entire table. |
| 2023 | */ |
| 2024 | arm_smmu_atc_inv_domain(smmu_domain, 0, start, size); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2025 | } |
| 2026 | |
Will Deacon | 3951c41 | 2019-07-02 16:45:15 +0100 | [diff] [blame] | 2027 | static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, |
| 2028 | unsigned long iova, size_t granule, |
Will Deacon | abfd6fe | 2019-07-02 16:44:41 +0100 | [diff] [blame] | 2029 | void *cookie) |
| 2030 | { |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 2031 | struct arm_smmu_domain *smmu_domain = cookie; |
| 2032 | struct iommu_domain *domain = &smmu_domain->domain; |
| 2033 | |
| 2034 | iommu_iotlb_gather_add_page(domain, gather, iova, granule); |
Will Deacon | abfd6fe | 2019-07-02 16:44:41 +0100 | [diff] [blame] | 2035 | } |
| 2036 | |
Will Deacon | 05aed94 | 2019-07-02 16:44:25 +0100 | [diff] [blame] | 2037 | static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, |
| 2038 | size_t granule, void *cookie) |
| 2039 | { |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 2040 | arm_smmu_tlb_inv_range(iova, size, granule, false, cookie); |
Will Deacon | 05aed94 | 2019-07-02 16:44:25 +0100 | [diff] [blame] | 2041 | } |
| 2042 | |
| 2043 | static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, |
| 2044 | size_t granule, void *cookie) |
| 2045 | { |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 2046 | arm_smmu_tlb_inv_range(iova, size, granule, true, cookie); |
Will Deacon | 05aed94 | 2019-07-02 16:44:25 +0100 | [diff] [blame] | 2047 | } |
| 2048 | |
Will Deacon | 298f7889 | 2019-07-02 16:43:34 +0100 | [diff] [blame] | 2049 | static const struct iommu_flush_ops arm_smmu_flush_ops = { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2050 | .tlb_flush_all = arm_smmu_tlb_inv_context, |
Will Deacon | 05aed94 | 2019-07-02 16:44:25 +0100 | [diff] [blame] | 2051 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, |
| 2052 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, |
Will Deacon | abfd6fe | 2019-07-02 16:44:41 +0100 | [diff] [blame] | 2053 | .tlb_add_page = arm_smmu_tlb_inv_page_nosync, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2054 | }; |
| 2055 | |
| 2056 | /* IOMMU API */ |
| 2057 | static bool arm_smmu_capable(enum iommu_cap cap) |
| 2058 | { |
| 2059 | switch (cap) { |
| 2060 | case IOMMU_CAP_CACHE_COHERENCY: |
| 2061 | return true; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2062 | case IOMMU_CAP_NOEXEC: |
| 2063 | return true; |
| 2064 | default: |
| 2065 | return false; |
| 2066 | } |
| 2067 | } |
| 2068 | |
| 2069 | static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) |
| 2070 | { |
| 2071 | struct arm_smmu_domain *smmu_domain; |
| 2072 | |
Will Deacon | beb3c6a | 2017-01-06 16:27:30 +0000 | [diff] [blame] | 2073 | if (type != IOMMU_DOMAIN_UNMANAGED && |
| 2074 | type != IOMMU_DOMAIN_DMA && |
| 2075 | type != IOMMU_DOMAIN_IDENTITY) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2076 | return NULL; |
| 2077 | |
| 2078 | /* |
| 2079 | * Allocate the domain and initialise some of its data structures. |
| 2080 | * We can't really do anything meaningful until we've added a |
| 2081 | * master. |
| 2082 | */ |
| 2083 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); |
| 2084 | if (!smmu_domain) |
| 2085 | return NULL; |
| 2086 | |
Robin Murphy | 9adb959 | 2016-01-26 18:06:36 +0000 | [diff] [blame] | 2087 | if (type == IOMMU_DOMAIN_DMA && |
| 2088 | iommu_get_dma_cookie(&smmu_domain->domain)) { |
| 2089 | kfree(smmu_domain); |
| 2090 | return NULL; |
| 2091 | } |
| 2092 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2093 | mutex_init(&smmu_domain->init_mutex); |
Jean-Philippe Brucker | 2a7e62f | 2019-04-17 19:24:46 +0100 | [diff] [blame] | 2094 | INIT_LIST_HEAD(&smmu_domain->devices); |
| 2095 | spin_lock_init(&smmu_domain->devices_lock); |
| 2096 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2097 | return &smmu_domain->domain; |
| 2098 | } |
| 2099 | |
| 2100 | static int arm_smmu_bitmap_alloc(unsigned long *map, int span) |
| 2101 | { |
| 2102 | int idx, size = 1 << span; |
| 2103 | |
| 2104 | do { |
| 2105 | idx = find_first_zero_bit(map, size); |
| 2106 | if (idx == size) |
| 2107 | return -ENOSPC; |
| 2108 | } while (test_and_set_bit(idx, map)); |
| 2109 | |
| 2110 | return idx; |
| 2111 | } |
| 2112 | |
| 2113 | static void arm_smmu_bitmap_free(unsigned long *map, int idx) |
| 2114 | { |
| 2115 | clear_bit(idx, map); |
| 2116 | } |
| 2117 | |
| 2118 | static void arm_smmu_domain_free(struct iommu_domain *domain) |
| 2119 | { |
| 2120 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 2121 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 2122 | |
Robin Murphy | 9adb959 | 2016-01-26 18:06:36 +0000 | [diff] [blame] | 2123 | iommu_put_dma_cookie(domain); |
Markus Elfring | a6e08fb | 2015-06-29 17:47:43 +0100 | [diff] [blame] | 2124 | free_io_pgtable_ops(smmu_domain->pgtbl_ops); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2125 | |
| 2126 | /* Free the CD and ASID, if we allocated them */ |
| 2127 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { |
| 2128 | struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; |
| 2129 | |
| 2130 | if (cfg->cdptr) { |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2131 | dmam_free_coherent(smmu_domain->smmu->dev, |
| 2132 | CTXDESC_CD_DWORDS << 3, |
| 2133 | cfg->cdptr, |
| 2134 | cfg->cdptr_dma); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2135 | |
| 2136 | arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid); |
| 2137 | } |
| 2138 | } else { |
| 2139 | struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; |
| 2140 | if (cfg->vmid) |
| 2141 | arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid); |
| 2142 | } |
| 2143 | |
| 2144 | kfree(smmu_domain); |
| 2145 | } |
| 2146 | |
| 2147 | static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, |
| 2148 | struct io_pgtable_cfg *pgtbl_cfg) |
| 2149 | { |
| 2150 | int ret; |
Will Deacon | c0733a2 | 2015-10-13 17:51:14 +0100 | [diff] [blame] | 2151 | int asid; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2152 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 2153 | struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; |
| 2154 | |
| 2155 | asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); |
Arnd Bergmann | 287980e | 2016-05-27 23:23:25 +0200 | [diff] [blame] | 2156 | if (asid < 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2157 | return asid; |
| 2158 | |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2159 | cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, |
| 2160 | &cfg->cdptr_dma, |
| 2161 | GFP_KERNEL | __GFP_ZERO); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2162 | if (!cfg->cdptr) { |
| 2163 | dev_warn(smmu->dev, "failed to allocate context descriptor\n"); |
Will Deacon | c0733a2 | 2015-10-13 17:51:14 +0100 | [diff] [blame] | 2164 | ret = -ENOMEM; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2165 | goto out_free_asid; |
| 2166 | } |
| 2167 | |
Will Deacon | c0733a2 | 2015-10-13 17:51:14 +0100 | [diff] [blame] | 2168 | cfg->cd.asid = (u16)asid; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2169 | cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; |
| 2170 | cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr; |
Robin Murphy | 205577a | 2019-10-25 19:08:36 +0100 | [diff] [blame] | 2171 | cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2172 | return 0; |
| 2173 | |
| 2174 | out_free_asid: |
| 2175 | arm_smmu_bitmap_free(smmu->asid_map, asid); |
| 2176 | return ret; |
| 2177 | } |
| 2178 | |
| 2179 | static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, |
| 2180 | struct io_pgtable_cfg *pgtbl_cfg) |
| 2181 | { |
Will Deacon | c0733a2 | 2015-10-13 17:51:14 +0100 | [diff] [blame] | 2182 | int vmid; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2183 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 2184 | struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; |
| 2185 | |
| 2186 | vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); |
Arnd Bergmann | 287980e | 2016-05-27 23:23:25 +0200 | [diff] [blame] | 2187 | if (vmid < 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2188 | return vmid; |
| 2189 | |
Will Deacon | c0733a2 | 2015-10-13 17:51:14 +0100 | [diff] [blame] | 2190 | cfg->vmid = (u16)vmid; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2191 | cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; |
| 2192 | cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; |
| 2193 | return 0; |
| 2194 | } |
| 2195 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2196 | static int arm_smmu_domain_finalise(struct iommu_domain *domain) |
| 2197 | { |
| 2198 | int ret; |
| 2199 | unsigned long ias, oas; |
| 2200 | enum io_pgtable_fmt fmt; |
| 2201 | struct io_pgtable_cfg pgtbl_cfg; |
| 2202 | struct io_pgtable_ops *pgtbl_ops; |
| 2203 | int (*finalise_stage_fn)(struct arm_smmu_domain *, |
| 2204 | struct io_pgtable_cfg *); |
| 2205 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 2206 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 2207 | |
Will Deacon | beb3c6a | 2017-01-06 16:27:30 +0000 | [diff] [blame] | 2208 | if (domain->type == IOMMU_DOMAIN_IDENTITY) { |
| 2209 | smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; |
| 2210 | return 0; |
| 2211 | } |
| 2212 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2213 | /* Restrict the stage to what we can actually support */ |
| 2214 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) |
| 2215 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; |
| 2216 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) |
| 2217 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; |
| 2218 | |
| 2219 | switch (smmu_domain->stage) { |
| 2220 | case ARM_SMMU_DOMAIN_S1: |
Robin Murphy | dcd189e | 2018-03-26 13:35:15 +0100 | [diff] [blame] | 2221 | ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48; |
| 2222 | ias = min_t(unsigned long, ias, VA_BITS); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2223 | oas = smmu->ias; |
| 2224 | fmt = ARM_64_LPAE_S1; |
| 2225 | finalise_stage_fn = arm_smmu_domain_finalise_s1; |
| 2226 | break; |
| 2227 | case ARM_SMMU_DOMAIN_NESTED: |
| 2228 | case ARM_SMMU_DOMAIN_S2: |
| 2229 | ias = smmu->ias; |
| 2230 | oas = smmu->oas; |
| 2231 | fmt = ARM_64_LPAE_S2; |
| 2232 | finalise_stage_fn = arm_smmu_domain_finalise_s2; |
| 2233 | break; |
| 2234 | default: |
| 2235 | return -EINVAL; |
| 2236 | } |
| 2237 | |
| 2238 | pgtbl_cfg = (struct io_pgtable_cfg) { |
Robin Murphy | d546635 | 2016-05-09 17:20:09 +0100 | [diff] [blame] | 2239 | .pgsize_bitmap = smmu->pgsize_bitmap, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2240 | .ias = ias, |
| 2241 | .oas = oas, |
Will Deacon | 4f41845 | 2019-06-25 12:51:25 +0100 | [diff] [blame] | 2242 | .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, |
Will Deacon | 298f7889 | 2019-07-02 16:43:34 +0100 | [diff] [blame] | 2243 | .tlb = &arm_smmu_flush_ops, |
Robin Murphy | bdc6d97 | 2015-07-29 19:46:07 +0100 | [diff] [blame] | 2244 | .iommu_dev = smmu->dev, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2245 | }; |
| 2246 | |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 2247 | if (smmu_domain->non_strict) |
| 2248 | pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; |
| 2249 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2250 | pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); |
| 2251 | if (!pgtbl_ops) |
| 2252 | return -ENOMEM; |
| 2253 | |
Robin Murphy | d546635 | 2016-05-09 17:20:09 +0100 | [diff] [blame] | 2254 | domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 2255 | domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; |
Robin Murphy | 455eb7d | 2016-09-12 17:13:58 +0100 | [diff] [blame] | 2256 | domain->geometry.force_aperture = true; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2257 | |
| 2258 | ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); |
Jean-Philippe Brucker | 57d72e1 | 2017-12-14 11:03:01 +0000 | [diff] [blame] | 2259 | if (ret < 0) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2260 | free_io_pgtable_ops(pgtbl_ops); |
Jean-Philippe Brucker | 57d72e1 | 2017-12-14 11:03:01 +0000 | [diff] [blame] | 2261 | return ret; |
| 2262 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2263 | |
Jean-Philippe Brucker | 57d72e1 | 2017-12-14 11:03:01 +0000 | [diff] [blame] | 2264 | smmu_domain->pgtbl_ops = pgtbl_ops; |
| 2265 | return 0; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2266 | } |
| 2267 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2268 | static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) |
| 2269 | { |
| 2270 | __le64 *step; |
| 2271 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; |
| 2272 | |
| 2273 | if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { |
| 2274 | struct arm_smmu_strtab_l1_desc *l1_desc; |
| 2275 | int idx; |
| 2276 | |
| 2277 | /* Two-level walk */ |
| 2278 | idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS; |
| 2279 | l1_desc = &cfg->l1_desc[idx]; |
| 2280 | idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS; |
| 2281 | step = &l1_desc->l2ptr[idx]; |
| 2282 | } else { |
| 2283 | /* Simple linear lookup */ |
| 2284 | step = &cfg->strtab[sid * STRTAB_STE_DWORDS]; |
| 2285 | } |
| 2286 | |
| 2287 | return step; |
| 2288 | } |
| 2289 | |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 2290 | static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2291 | { |
Robin Murphy | 563b5cb | 2018-01-02 12:33:14 +0000 | [diff] [blame] | 2292 | int i, j; |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2293 | struct arm_smmu_device *smmu = master->smmu; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2294 | |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 2295 | for (i = 0; i < master->num_sids; ++i) { |
| 2296 | u32 sid = master->sids[i]; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2297 | __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); |
| 2298 | |
Robin Murphy | 563b5cb | 2018-01-02 12:33:14 +0000 | [diff] [blame] | 2299 | /* Bridged PCI devices may end up with duplicated IDs */ |
| 2300 | for (j = 0; j < i; j++) |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 2301 | if (master->sids[j] == sid) |
Robin Murphy | 563b5cb | 2018-01-02 12:33:14 +0000 | [diff] [blame] | 2302 | break; |
| 2303 | if (j < i) |
| 2304 | continue; |
| 2305 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 2306 | arm_smmu_write_strtab_ent(master, sid, step); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2307 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2308 | } |
| 2309 | |
YueHaibing | 097a7df | 2019-09-03 14:50:56 +0800 | [diff] [blame] | 2310 | #ifdef CONFIG_PCI_ATS |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2311 | static bool arm_smmu_ats_supported(struct arm_smmu_master *master) |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2312 | { |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2313 | struct pci_dev *pdev; |
| 2314 | struct arm_smmu_device *smmu = master->smmu; |
| 2315 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); |
| 2316 | |
| 2317 | if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) || |
| 2318 | !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled()) |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2319 | return false; |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2320 | |
| 2321 | pdev = to_pci_dev(master->dev); |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2322 | return !pdev->untrusted && pdev->ats_cap; |
| 2323 | } |
YueHaibing | 097a7df | 2019-09-03 14:50:56 +0800 | [diff] [blame] | 2324 | #else |
| 2325 | static bool arm_smmu_ats_supported(struct arm_smmu_master *master) |
| 2326 | { |
| 2327 | return false; |
| 2328 | } |
| 2329 | #endif |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2330 | |
| 2331 | static void arm_smmu_enable_ats(struct arm_smmu_master *master) |
| 2332 | { |
| 2333 | size_t stu; |
| 2334 | struct pci_dev *pdev; |
| 2335 | struct arm_smmu_device *smmu = master->smmu; |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 2336 | struct arm_smmu_domain *smmu_domain = master->domain; |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2337 | |
| 2338 | /* Don't enable ATS at the endpoint if it's not enabled in the STE */ |
| 2339 | if (!master->ats_enabled) |
| 2340 | return; |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2341 | |
| 2342 | /* Smallest Translation Unit: log2 of the smallest supported granule */ |
| 2343 | stu = __ffs(smmu->pgsize_bitmap); |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2344 | pdev = to_pci_dev(master->dev); |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2345 | |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 2346 | atomic_inc(&smmu_domain->nr_ats_masters); |
| 2347 | arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2348 | if (pci_enable_ats(pdev, stu)) |
| 2349 | dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2350 | } |
| 2351 | |
| 2352 | static void arm_smmu_disable_ats(struct arm_smmu_master *master) |
| 2353 | { |
Jean-Philippe Brucker | 8dd8f00 | 2019-07-03 12:19:20 +0100 | [diff] [blame] | 2354 | struct arm_smmu_cmdq_ent cmd; |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 2355 | struct arm_smmu_domain *smmu_domain = master->domain; |
Jean-Philippe Brucker | 8dd8f00 | 2019-07-03 12:19:20 +0100 | [diff] [blame] | 2356 | |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2357 | if (!master->ats_enabled) |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2358 | return; |
| 2359 | |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2360 | pci_disable_ats(to_pci_dev(master->dev)); |
| 2361 | /* |
| 2362 | * Ensure ATS is disabled at the endpoint before we issue the |
| 2363 | * ATC invalidation via the SMMU. |
| 2364 | */ |
| 2365 | wmb(); |
Jean-Philippe Brucker | 8dd8f00 | 2019-07-03 12:19:20 +0100 | [diff] [blame] | 2366 | arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); |
| 2367 | arm_smmu_atc_inv_master(master, &cmd); |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 2368 | atomic_dec(&smmu_domain->nr_ats_masters); |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2369 | } |
| 2370 | |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 2371 | static void arm_smmu_detach_dev(struct arm_smmu_master *master) |
Will Deacon | bc7f2ce | 2016-02-17 17:41:57 +0000 | [diff] [blame] | 2372 | { |
Jean-Philippe Brucker | 2a7e62f | 2019-04-17 19:24:46 +0100 | [diff] [blame] | 2373 | unsigned long flags; |
| 2374 | struct arm_smmu_domain *smmu_domain = master->domain; |
| 2375 | |
| 2376 | if (!smmu_domain) |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 2377 | return; |
| 2378 | |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 2379 | arm_smmu_disable_ats(master); |
| 2380 | |
Jean-Philippe Brucker | 2a7e62f | 2019-04-17 19:24:46 +0100 | [diff] [blame] | 2381 | spin_lock_irqsave(&smmu_domain->devices_lock, flags); |
| 2382 | list_del(&master->domain_head); |
| 2383 | spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); |
| 2384 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 2385 | master->domain = NULL; |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2386 | master->ats_enabled = false; |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 2387 | arm_smmu_install_ste_for_dev(master); |
Will Deacon | bc7f2ce | 2016-02-17 17:41:57 +0000 | [diff] [blame] | 2388 | } |
| 2389 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2390 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
| 2391 | { |
| 2392 | int ret = 0; |
Jean-Philippe Brucker | 2a7e62f | 2019-04-17 19:24:46 +0100 | [diff] [blame] | 2393 | unsigned long flags; |
Joerg Roedel | 9b468f7 | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 2394 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2395 | struct arm_smmu_device *smmu; |
| 2396 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
Jean-Philippe Brucker | b54f426 | 2019-04-17 19:24:43 +0100 | [diff] [blame] | 2397 | struct arm_smmu_master *master; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2398 | |
Joerg Roedel | 9b468f7 | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 2399 | if (!fwspec) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2400 | return -ENOENT; |
| 2401 | |
Joerg Roedel | 9b468f7 | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 2402 | master = fwspec->iommu_priv; |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2403 | smmu = master->smmu; |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2404 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 2405 | arm_smmu_detach_dev(master); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2406 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2407 | mutex_lock(&smmu_domain->init_mutex); |
| 2408 | |
| 2409 | if (!smmu_domain->smmu) { |
| 2410 | smmu_domain->smmu = smmu; |
| 2411 | ret = arm_smmu_domain_finalise(domain); |
| 2412 | if (ret) { |
| 2413 | smmu_domain->smmu = NULL; |
| 2414 | goto out_unlock; |
| 2415 | } |
| 2416 | } else if (smmu_domain->smmu != smmu) { |
| 2417 | dev_err(dev, |
| 2418 | "cannot attach to SMMU %s (upstream of %s)\n", |
| 2419 | dev_name(smmu_domain->smmu->dev), |
| 2420 | dev_name(smmu->dev)); |
| 2421 | ret = -ENXIO; |
| 2422 | goto out_unlock; |
| 2423 | } |
| 2424 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 2425 | master->domain = smmu_domain; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2426 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2427 | if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2428 | master->ats_enabled = arm_smmu_ats_supported(master); |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2429 | |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 2430 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) |
| 2431 | arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg); |
Will Deacon | cbf8277 | 2016-02-18 12:05:57 +0000 | [diff] [blame] | 2432 | |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 2433 | arm_smmu_install_ste_for_dev(master); |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 2434 | |
| 2435 | spin_lock_irqsave(&smmu_domain->devices_lock, flags); |
| 2436 | list_add(&master->domain_head, &smmu_domain->devices); |
| 2437 | spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); |
| 2438 | |
Will Deacon | bfff88e | 2019-08-20 14:28:59 +0100 | [diff] [blame] | 2439 | arm_smmu_enable_ats(master); |
Will Deacon | cdb8a3c | 2019-08-20 16:28:54 +0100 | [diff] [blame] | 2440 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2441 | out_unlock: |
| 2442 | mutex_unlock(&smmu_domain->init_mutex); |
| 2443 | return ret; |
| 2444 | } |
| 2445 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2446 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 2447 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2448 | { |
Robin Murphy | 58188af | 2017-06-22 16:53:57 +0100 | [diff] [blame] | 2449 | struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2450 | |
| 2451 | if (!ops) |
| 2452 | return -ENODEV; |
| 2453 | |
Robin Murphy | 58188af | 2017-06-22 16:53:57 +0100 | [diff] [blame] | 2454 | return ops->map(ops, iova, paddr, size, prot); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2455 | } |
| 2456 | |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 2457 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
| 2458 | size_t size, struct iommu_iotlb_gather *gather) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2459 | { |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2460 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 2461 | struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2462 | |
| 2463 | if (!ops) |
| 2464 | return 0; |
| 2465 | |
Will Deacon | 353e3cf | 2019-08-20 15:12:12 +0100 | [diff] [blame] | 2466 | return ops->unmap(ops, iova, size, gather); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2467 | } |
| 2468 | |
Zhen Lei | 07fdef3 | 2018-09-20 17:10:21 +0100 | [diff] [blame] | 2469 | static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) |
| 2470 | { |
| 2471 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 2472 | |
| 2473 | if (smmu_domain->smmu) |
| 2474 | arm_smmu_tlb_inv_context(smmu_domain); |
| 2475 | } |
| 2476 | |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 2477 | static void arm_smmu_iotlb_sync(struct iommu_domain *domain, |
| 2478 | struct iommu_iotlb_gather *gather) |
Robin Murphy | 32b1244 | 2017-09-28 15:55:01 +0100 | [diff] [blame] | 2479 | { |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 2480 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
Robin Murphy | 32b1244 | 2017-09-28 15:55:01 +0100 | [diff] [blame] | 2481 | |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 2482 | arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start, |
| 2483 | gather->pgsize, true, smmu_domain); |
Robin Murphy | 32b1244 | 2017-09-28 15:55:01 +0100 | [diff] [blame] | 2484 | } |
| 2485 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2486 | static phys_addr_t |
| 2487 | arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) |
| 2488 | { |
Robin Murphy | 58188af | 2017-06-22 16:53:57 +0100 | [diff] [blame] | 2489 | struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2490 | |
Sunil Goutham | bdf9592 | 2017-04-25 15:27:52 +0530 | [diff] [blame] | 2491 | if (domain->type == IOMMU_DOMAIN_IDENTITY) |
| 2492 | return iova; |
| 2493 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2494 | if (!ops) |
| 2495 | return 0; |
| 2496 | |
Robin Murphy | 58188af | 2017-06-22 16:53:57 +0100 | [diff] [blame] | 2497 | return ops->iova_to_phys(ops, iova); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2498 | } |
| 2499 | |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2500 | static struct platform_driver arm_smmu_driver; |
| 2501 | |
Lorenzo Pieralisi | 778de07 | 2016-11-21 10:01:38 +0000 | [diff] [blame] | 2502 | static |
| 2503 | struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2504 | { |
Suzuki K Poulose | 67843bb | 2019-07-23 23:18:34 +0100 | [diff] [blame] | 2505 | struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver, |
| 2506 | fwnode); |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2507 | put_device(dev); |
| 2508 | return dev ? dev_get_drvdata(dev) : NULL; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2509 | } |
| 2510 | |
| 2511 | static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) |
| 2512 | { |
| 2513 | unsigned long limit = smmu->strtab_cfg.num_l1_ents; |
| 2514 | |
| 2515 | if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) |
| 2516 | limit *= 1UL << STRTAB_SPLIT; |
| 2517 | |
| 2518 | return sid < limit; |
| 2519 | } |
| 2520 | |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2521 | static struct iommu_ops arm_smmu_ops; |
| 2522 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2523 | static int arm_smmu_add_device(struct device *dev) |
| 2524 | { |
| 2525 | int i, ret; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2526 | struct arm_smmu_device *smmu; |
Jean-Philippe Brucker | b54f426 | 2019-04-17 19:24:43 +0100 | [diff] [blame] | 2527 | struct arm_smmu_master *master; |
Joerg Roedel | 9b468f7 | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 2528 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2529 | struct iommu_group *group; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2530 | |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2531 | if (!fwspec || fwspec->ops != &arm_smmu_ops) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2532 | return -ENODEV; |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2533 | /* |
| 2534 | * We _can_ actually withstand dodgy bus code re-calling add_device() |
| 2535 | * without an intervening remove_device()/of_xlate() sequence, but |
| 2536 | * we're not going to do so quietly... |
| 2537 | */ |
| 2538 | if (WARN_ON_ONCE(fwspec->iommu_priv)) { |
| 2539 | master = fwspec->iommu_priv; |
| 2540 | smmu = master->smmu; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2541 | } else { |
Lorenzo Pieralisi | 778de07 | 2016-11-21 10:01:38 +0000 | [diff] [blame] | 2542 | smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2543 | if (!smmu) |
| 2544 | return -ENODEV; |
| 2545 | master = kzalloc(sizeof(*master), GFP_KERNEL); |
| 2546 | if (!master) |
| 2547 | return -ENOMEM; |
| 2548 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 2549 | master->dev = dev; |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2550 | master->smmu = smmu; |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 2551 | master->sids = fwspec->ids; |
| 2552 | master->num_sids = fwspec->num_ids; |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2553 | fwspec->iommu_priv = master; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2554 | } |
| 2555 | |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2556 | /* Check the SIDs are in range of the SMMU and our stream table */ |
Jean-Philippe Brucker | bcecaee | 2019-04-17 19:24:44 +0100 | [diff] [blame] | 2557 | for (i = 0; i < master->num_sids; i++) { |
| 2558 | u32 sid = master->sids[i]; |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2559 | |
| 2560 | if (!arm_smmu_sid_in_range(smmu, sid)) |
| 2561 | return -ERANGE; |
| 2562 | |
| 2563 | /* Ensure l2 strtab is initialised */ |
| 2564 | if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { |
| 2565 | ret = arm_smmu_init_l2_strtab(smmu, sid); |
| 2566 | if (ret) |
| 2567 | return ret; |
| 2568 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2569 | } |
| 2570 | |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2571 | group = iommu_group_get_for_dev(dev); |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 2572 | if (!IS_ERR(group)) { |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2573 | iommu_group_put(group); |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 2574 | iommu_device_link(&smmu->iommu, dev); |
| 2575 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2576 | |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2577 | return PTR_ERR_OR_ZERO(group); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2578 | } |
| 2579 | |
| 2580 | static void arm_smmu_remove_device(struct device *dev) |
| 2581 | { |
Joerg Roedel | 9b468f7 | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 2582 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Jean-Philippe Brucker | b54f426 | 2019-04-17 19:24:43 +0100 | [diff] [blame] | 2583 | struct arm_smmu_master *master; |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 2584 | struct arm_smmu_device *smmu; |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2585 | |
| 2586 | if (!fwspec || fwspec->ops != &arm_smmu_ops) |
| 2587 | return; |
| 2588 | |
| 2589 | master = fwspec->iommu_priv; |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 2590 | smmu = master->smmu; |
Jean-Philippe Brucker | 8be39a1 | 2019-04-17 19:24:45 +0100 | [diff] [blame] | 2591 | arm_smmu_detach_dev(master); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2592 | iommu_group_remove_device(dev); |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 2593 | iommu_device_unlink(&smmu->iommu, dev); |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2594 | kfree(master); |
| 2595 | iommu_fwspec_free(dev); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2596 | } |
| 2597 | |
Robin Murphy | 08d4ca2 | 2016-09-12 17:13:46 +0100 | [diff] [blame] | 2598 | static struct iommu_group *arm_smmu_device_group(struct device *dev) |
| 2599 | { |
| 2600 | struct iommu_group *group; |
| 2601 | |
| 2602 | /* |
| 2603 | * We don't support devices sharing stream IDs other than PCI RID |
| 2604 | * aliases, since the necessary ID-to-device lookup becomes rather |
| 2605 | * impractical given a potential sparse 32-bit stream ID space. |
| 2606 | */ |
| 2607 | if (dev_is_pci(dev)) |
| 2608 | group = pci_device_group(dev); |
| 2609 | else |
| 2610 | group = generic_device_group(dev); |
| 2611 | |
| 2612 | return group; |
| 2613 | } |
| 2614 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2615 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, |
| 2616 | enum iommu_attr attr, void *data) |
| 2617 | { |
| 2618 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 2619 | |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 2620 | switch (domain->type) { |
| 2621 | case IOMMU_DOMAIN_UNMANAGED: |
| 2622 | switch (attr) { |
| 2623 | case DOMAIN_ATTR_NESTING: |
| 2624 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); |
| 2625 | return 0; |
| 2626 | default: |
| 2627 | return -ENODEV; |
| 2628 | } |
| 2629 | break; |
| 2630 | case IOMMU_DOMAIN_DMA: |
| 2631 | switch (attr) { |
| 2632 | case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: |
| 2633 | *(int *)data = smmu_domain->non_strict; |
| 2634 | return 0; |
| 2635 | default: |
| 2636 | return -ENODEV; |
| 2637 | } |
| 2638 | break; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2639 | default: |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 2640 | return -EINVAL; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2641 | } |
| 2642 | } |
| 2643 | |
| 2644 | static int arm_smmu_domain_set_attr(struct iommu_domain *domain, |
| 2645 | enum iommu_attr attr, void *data) |
| 2646 | { |
| 2647 | int ret = 0; |
| 2648 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 2649 | |
| 2650 | mutex_lock(&smmu_domain->init_mutex); |
| 2651 | |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 2652 | switch (domain->type) { |
| 2653 | case IOMMU_DOMAIN_UNMANAGED: |
| 2654 | switch (attr) { |
| 2655 | case DOMAIN_ATTR_NESTING: |
| 2656 | if (smmu_domain->smmu) { |
| 2657 | ret = -EPERM; |
| 2658 | goto out_unlock; |
| 2659 | } |
| 2660 | |
| 2661 | if (*(int *)data) |
| 2662 | smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; |
| 2663 | else |
| 2664 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; |
| 2665 | break; |
| 2666 | default: |
| 2667 | ret = -ENODEV; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2668 | } |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 2669 | break; |
| 2670 | case IOMMU_DOMAIN_DMA: |
| 2671 | switch(attr) { |
| 2672 | case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: |
| 2673 | smmu_domain->non_strict = *(int *)data; |
| 2674 | break; |
| 2675 | default: |
| 2676 | ret = -ENODEV; |
| 2677 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2678 | break; |
| 2679 | default: |
Zhen Lei | 9662b99 | 2018-09-20 17:10:25 +0100 | [diff] [blame] | 2680 | ret = -EINVAL; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2681 | } |
| 2682 | |
| 2683 | out_unlock: |
| 2684 | mutex_unlock(&smmu_domain->init_mutex); |
| 2685 | return ret; |
| 2686 | } |
| 2687 | |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2688 | static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) |
| 2689 | { |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2690 | return iommu_fwspec_add_ids(dev, args->args, 1); |
| 2691 | } |
| 2692 | |
Eric Auger | 50019f0 | 2017-01-19 20:57:56 +0000 | [diff] [blame] | 2693 | static void arm_smmu_get_resv_regions(struct device *dev, |
| 2694 | struct list_head *head) |
| 2695 | { |
| 2696 | struct iommu_resv_region *region; |
| 2697 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
| 2698 | |
| 2699 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, |
Robin Murphy | 9d3a4de | 2017-03-16 17:00:16 +0000 | [diff] [blame] | 2700 | prot, IOMMU_RESV_SW_MSI); |
Eric Auger | 50019f0 | 2017-01-19 20:57:56 +0000 | [diff] [blame] | 2701 | if (!region) |
| 2702 | return; |
| 2703 | |
| 2704 | list_add_tail(®ion->list, head); |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 2705 | |
| 2706 | iommu_dma_get_resv_regions(dev, head); |
Eric Auger | 50019f0 | 2017-01-19 20:57:56 +0000 | [diff] [blame] | 2707 | } |
| 2708 | |
| 2709 | static void arm_smmu_put_resv_regions(struct device *dev, |
| 2710 | struct list_head *head) |
| 2711 | { |
| 2712 | struct iommu_resv_region *entry, *next; |
| 2713 | |
| 2714 | list_for_each_entry_safe(entry, next, head, list) |
| 2715 | kfree(entry); |
| 2716 | } |
| 2717 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2718 | static struct iommu_ops arm_smmu_ops = { |
| 2719 | .capable = arm_smmu_capable, |
| 2720 | .domain_alloc = arm_smmu_domain_alloc, |
| 2721 | .domain_free = arm_smmu_domain_free, |
| 2722 | .attach_dev = arm_smmu_attach_dev, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2723 | .map = arm_smmu_map, |
| 2724 | .unmap = arm_smmu_unmap, |
Zhen Lei | 07fdef3 | 2018-09-20 17:10:21 +0100 | [diff] [blame] | 2725 | .flush_iotlb_all = arm_smmu_flush_iotlb_all, |
Robin Murphy | 32b1244 | 2017-09-28 15:55:01 +0100 | [diff] [blame] | 2726 | .iotlb_sync = arm_smmu_iotlb_sync, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2727 | .iova_to_phys = arm_smmu_iova_to_phys, |
| 2728 | .add_device = arm_smmu_add_device, |
| 2729 | .remove_device = arm_smmu_remove_device, |
Robin Murphy | 08d4ca2 | 2016-09-12 17:13:46 +0100 | [diff] [blame] | 2730 | .device_group = arm_smmu_device_group, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2731 | .domain_get_attr = arm_smmu_domain_get_attr, |
| 2732 | .domain_set_attr = arm_smmu_domain_set_attr, |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 2733 | .of_xlate = arm_smmu_of_xlate, |
Eric Auger | 50019f0 | 2017-01-19 20:57:56 +0000 | [diff] [blame] | 2734 | .get_resv_regions = arm_smmu_get_resv_regions, |
| 2735 | .put_resv_regions = arm_smmu_put_resv_regions, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2736 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
| 2737 | }; |
| 2738 | |
| 2739 | /* Probing and initialisation functions */ |
| 2740 | static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, |
| 2741 | struct arm_smmu_queue *q, |
| 2742 | unsigned long prod_off, |
| 2743 | unsigned long cons_off, |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2744 | size_t dwords, const char *name) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2745 | { |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2746 | size_t qsz; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2747 | |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2748 | do { |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 2749 | qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2750 | q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, |
| 2751 | GFP_KERNEL); |
| 2752 | if (q->base || qsz < PAGE_SIZE) |
| 2753 | break; |
| 2754 | |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 2755 | q->llq.max_n_shift--; |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2756 | } while (1); |
| 2757 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2758 | if (!q->base) { |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2759 | dev_err(smmu->dev, |
| 2760 | "failed to allocate queue (0x%zx bytes) for %s\n", |
| 2761 | qsz, name); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2762 | return -ENOMEM; |
| 2763 | } |
| 2764 | |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2765 | if (!WARN_ON(q->base_dma & (qsz - 1))) { |
| 2766 | dev_info(smmu->dev, "allocated %u entries for %s\n", |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 2767 | 1 << q->llq.max_n_shift, name); |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2768 | } |
| 2769 | |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 2770 | q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); |
| 2771 | q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2772 | q->ent_dwords = dwords; |
| 2773 | |
| 2774 | q->q_base = Q_BASE_RWA; |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 2775 | q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 2776 | q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2777 | |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 2778 | q->llq.prod = q->llq.cons = 0; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2779 | return 0; |
| 2780 | } |
| 2781 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 2782 | static void arm_smmu_cmdq_free_bitmap(void *data) |
| 2783 | { |
| 2784 | unsigned long *bitmap = data; |
| 2785 | bitmap_free(bitmap); |
| 2786 | } |
| 2787 | |
| 2788 | static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) |
| 2789 | { |
| 2790 | int ret = 0; |
| 2791 | struct arm_smmu_cmdq *cmdq = &smmu->cmdq; |
| 2792 | unsigned int nents = 1 << cmdq->q.llq.max_n_shift; |
| 2793 | atomic_long_t *bitmap; |
| 2794 | |
| 2795 | atomic_set(&cmdq->owner_prod, 0); |
| 2796 | atomic_set(&cmdq->lock, 0); |
| 2797 | |
| 2798 | bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL); |
| 2799 | if (!bitmap) { |
| 2800 | dev_err(smmu->dev, "failed to allocate cmdq bitmap\n"); |
| 2801 | ret = -ENOMEM; |
| 2802 | } else { |
| 2803 | cmdq->valid_map = bitmap; |
| 2804 | devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap); |
| 2805 | } |
| 2806 | |
| 2807 | return ret; |
| 2808 | } |
| 2809 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2810 | static int arm_smmu_init_queues(struct arm_smmu_device *smmu) |
| 2811 | { |
| 2812 | int ret; |
| 2813 | |
| 2814 | /* cmdq */ |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2815 | ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2816 | ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS, |
| 2817 | "cmdq"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2818 | if (ret) |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2819 | return ret; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2820 | |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 2821 | ret = arm_smmu_cmdq_init(smmu); |
| 2822 | if (ret) |
| 2823 | return ret; |
| 2824 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2825 | /* evtq */ |
| 2826 | ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2827 | ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS, |
| 2828 | "evtq"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2829 | if (ret) |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2830 | return ret; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2831 | |
| 2832 | /* priq */ |
| 2833 | if (!(smmu->features & ARM_SMMU_FEAT_PRI)) |
| 2834 | return 0; |
| 2835 | |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2836 | return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 2837 | ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS, |
| 2838 | "priq"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2839 | } |
| 2840 | |
| 2841 | static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) |
| 2842 | { |
| 2843 | unsigned int i; |
| 2844 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; |
| 2845 | size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents; |
| 2846 | void *strtab = smmu->strtab_cfg.strtab; |
| 2847 | |
| 2848 | cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL); |
| 2849 | if (!cfg->l1_desc) { |
| 2850 | dev_err(smmu->dev, "failed to allocate l1 stream table desc\n"); |
| 2851 | return -ENOMEM; |
| 2852 | } |
| 2853 | |
| 2854 | for (i = 0; i < cfg->num_l1_ents; ++i) { |
| 2855 | arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]); |
| 2856 | strtab += STRTAB_L1_DESC_DWORDS << 3; |
| 2857 | } |
| 2858 | |
| 2859 | return 0; |
| 2860 | } |
| 2861 | |
| 2862 | static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) |
| 2863 | { |
| 2864 | void *strtab; |
| 2865 | u64 reg; |
Will Deacon | d2e88e7 | 2015-06-30 10:02:28 +0100 | [diff] [blame] | 2866 | u32 size, l1size; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2867 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; |
| 2868 | |
Nate Watterson | 692c4e4 | 2017-01-10 14:47:13 -0500 | [diff] [blame] | 2869 | /* Calculate the L1 size, capped to the SIDSIZE. */ |
| 2870 | size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); |
| 2871 | size = min(size, smmu->sid_bits - STRTAB_SPLIT); |
Will Deacon | d2e88e7 | 2015-06-30 10:02:28 +0100 | [diff] [blame] | 2872 | cfg->num_l1_ents = 1 << size; |
| 2873 | |
| 2874 | size += STRTAB_SPLIT; |
| 2875 | if (size < smmu->sid_bits) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2876 | dev_warn(smmu->dev, |
| 2877 | "2-level strtab only covers %u/%u bits of SID\n", |
Will Deacon | d2e88e7 | 2015-06-30 10:02:28 +0100 | [diff] [blame] | 2878 | size, smmu->sid_bits); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2879 | |
Will Deacon | d2e88e7 | 2015-06-30 10:02:28 +0100 | [diff] [blame] | 2880 | l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2881 | strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, |
| 2882 | GFP_KERNEL | __GFP_ZERO); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2883 | if (!strtab) { |
| 2884 | dev_err(smmu->dev, |
| 2885 | "failed to allocate l1 stream table (%u bytes)\n", |
| 2886 | size); |
| 2887 | return -ENOMEM; |
| 2888 | } |
| 2889 | cfg->strtab = strtab; |
| 2890 | |
| 2891 | /* Configure strtab_base_cfg for 2 levels */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 2892 | reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL); |
| 2893 | reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size); |
| 2894 | reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2895 | cfg->strtab_base_cfg = reg; |
| 2896 | |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2897 | return arm_smmu_init_l1_strtab(smmu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2898 | } |
| 2899 | |
| 2900 | static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) |
| 2901 | { |
| 2902 | void *strtab; |
| 2903 | u64 reg; |
| 2904 | u32 size; |
| 2905 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; |
| 2906 | |
| 2907 | size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3); |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2908 | strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma, |
| 2909 | GFP_KERNEL | __GFP_ZERO); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2910 | if (!strtab) { |
| 2911 | dev_err(smmu->dev, |
| 2912 | "failed to allocate linear stream table (%u bytes)\n", |
| 2913 | size); |
| 2914 | return -ENOMEM; |
| 2915 | } |
| 2916 | cfg->strtab = strtab; |
| 2917 | cfg->num_l1_ents = 1 << smmu->sid_bits; |
| 2918 | |
| 2919 | /* Configure strtab_base_cfg for a linear table covering all SIDs */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 2920 | reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR); |
| 2921 | reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2922 | cfg->strtab_base_cfg = reg; |
| 2923 | |
| 2924 | arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents); |
| 2925 | return 0; |
| 2926 | } |
| 2927 | |
| 2928 | static int arm_smmu_init_strtab(struct arm_smmu_device *smmu) |
| 2929 | { |
| 2930 | u64 reg; |
| 2931 | int ret; |
| 2932 | |
| 2933 | if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) |
| 2934 | ret = arm_smmu_init_strtab_2lvl(smmu); |
| 2935 | else |
| 2936 | ret = arm_smmu_init_strtab_linear(smmu); |
| 2937 | |
| 2938 | if (ret) |
| 2939 | return ret; |
| 2940 | |
| 2941 | /* Set the strtab base address */ |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 2942 | reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2943 | reg |= STRTAB_BASE_RA; |
| 2944 | smmu->strtab_cfg.strtab_base = reg; |
| 2945 | |
| 2946 | /* Allocate the first VMID for stage-2 bypass STEs */ |
| 2947 | set_bit(0, smmu->vmid_map); |
| 2948 | return 0; |
| 2949 | } |
| 2950 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2951 | static int arm_smmu_init_structures(struct arm_smmu_device *smmu) |
| 2952 | { |
| 2953 | int ret; |
| 2954 | |
| 2955 | ret = arm_smmu_init_queues(smmu); |
| 2956 | if (ret) |
| 2957 | return ret; |
| 2958 | |
Will Deacon | 04fa26c | 2015-10-30 18:12:41 +0000 | [diff] [blame] | 2959 | return arm_smmu_init_strtab(smmu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 2960 | } |
| 2961 | |
| 2962 | static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, |
| 2963 | unsigned int reg_off, unsigned int ack_off) |
| 2964 | { |
| 2965 | u32 reg; |
| 2966 | |
| 2967 | writel_relaxed(val, smmu->base + reg_off); |
| 2968 | return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val, |
| 2969 | 1, ARM_SMMU_POLL_TIMEOUT_US); |
| 2970 | } |
| 2971 | |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 2972 | /* GBPA is "special" */ |
| 2973 | static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) |
| 2974 | { |
| 2975 | int ret; |
| 2976 | u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; |
| 2977 | |
| 2978 | ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), |
| 2979 | 1, ARM_SMMU_POLL_TIMEOUT_US); |
| 2980 | if (ret) |
| 2981 | return ret; |
| 2982 | |
| 2983 | reg &= ~clr; |
| 2984 | reg |= set; |
| 2985 | writel_relaxed(reg | GBPA_UPDATE, gbpa); |
Will Deacon | b63b343 | 2018-07-25 15:58:43 +0100 | [diff] [blame] | 2986 | ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), |
| 2987 | 1, ARM_SMMU_POLL_TIMEOUT_US); |
| 2988 | |
| 2989 | if (ret) |
| 2990 | dev_err(smmu->dev, "GBPA not responding to update\n"); |
| 2991 | return ret; |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 2992 | } |
| 2993 | |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 2994 | static void arm_smmu_free_msis(void *data) |
| 2995 | { |
| 2996 | struct device *dev = data; |
| 2997 | platform_msi_domain_free_irqs(dev); |
| 2998 | } |
| 2999 | |
| 3000 | static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) |
| 3001 | { |
| 3002 | phys_addr_t doorbell; |
| 3003 | struct device *dev = msi_desc_to_dev(desc); |
| 3004 | struct arm_smmu_device *smmu = dev_get_drvdata(dev); |
| 3005 | phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index]; |
| 3006 | |
| 3007 | doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; |
Robin Murphy | 1cf9e54 | 2018-03-26 13:35:09 +0100 | [diff] [blame] | 3008 | doorbell &= MSI_CFG0_ADDR_MASK; |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 3009 | |
| 3010 | writeq_relaxed(doorbell, smmu->base + cfg[0]); |
| 3011 | writel_relaxed(msg->data, smmu->base + cfg[1]); |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3012 | writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 3013 | } |
| 3014 | |
| 3015 | static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) |
| 3016 | { |
| 3017 | struct msi_desc *desc; |
| 3018 | int ret, nvec = ARM_SMMU_MAX_MSIS; |
| 3019 | struct device *dev = smmu->dev; |
| 3020 | |
| 3021 | /* Clear the MSI address regs */ |
| 3022 | writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); |
| 3023 | writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); |
| 3024 | |
| 3025 | if (smmu->features & ARM_SMMU_FEAT_PRI) |
| 3026 | writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); |
| 3027 | else |
| 3028 | nvec--; |
| 3029 | |
| 3030 | if (!(smmu->features & ARM_SMMU_FEAT_MSI)) |
| 3031 | return; |
| 3032 | |
Nate Watterson | 940ded9 | 2018-01-20 13:08:04 -0500 | [diff] [blame] | 3033 | if (!dev->msi_domain) { |
| 3034 | dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n"); |
| 3035 | return; |
| 3036 | } |
| 3037 | |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 3038 | /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */ |
| 3039 | ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg); |
| 3040 | if (ret) { |
Nate Watterson | 940ded9 | 2018-01-20 13:08:04 -0500 | [diff] [blame] | 3041 | dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n"); |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 3042 | return; |
| 3043 | } |
| 3044 | |
| 3045 | for_each_msi_entry(desc, dev) { |
| 3046 | switch (desc->platform.msi_index) { |
| 3047 | case EVTQ_MSI_INDEX: |
| 3048 | smmu->evtq.q.irq = desc->irq; |
| 3049 | break; |
| 3050 | case GERROR_MSI_INDEX: |
| 3051 | smmu->gerr_irq = desc->irq; |
| 3052 | break; |
| 3053 | case PRIQ_MSI_INDEX: |
| 3054 | smmu->priq.q.irq = desc->irq; |
| 3055 | break; |
| 3056 | default: /* Unknown */ |
| 3057 | continue; |
| 3058 | } |
| 3059 | } |
| 3060 | |
| 3061 | /* Add callback to free MSIs on teardown */ |
| 3062 | devm_add_action(dev, arm_smmu_free_msis, dev); |
| 3063 | } |
| 3064 | |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 3065 | static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3066 | { |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 3067 | int irq, ret; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3068 | |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 3069 | arm_smmu_setup_msis(smmu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3070 | |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 3071 | /* Request interrupt lines */ |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3072 | irq = smmu->evtq.q.irq; |
| 3073 | if (irq) { |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 3074 | ret = devm_request_threaded_irq(smmu->dev, irq, NULL, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3075 | arm_smmu_evtq_thread, |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 3076 | IRQF_ONESHOT, |
| 3077 | "arm-smmu-v3-evtq", smmu); |
Arnd Bergmann | 287980e | 2016-05-27 23:23:25 +0200 | [diff] [blame] | 3078 | if (ret < 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3079 | dev_warn(smmu->dev, "failed to enable evtq irq\n"); |
Robin Murphy | 4c8996d | 2017-10-30 12:14:02 +0000 | [diff] [blame] | 3080 | } else { |
| 3081 | dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3082 | } |
| 3083 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3084 | irq = smmu->gerr_irq; |
| 3085 | if (irq) { |
| 3086 | ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, |
| 3087 | 0, "arm-smmu-v3-gerror", smmu); |
Arnd Bergmann | 287980e | 2016-05-27 23:23:25 +0200 | [diff] [blame] | 3088 | if (ret < 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3089 | dev_warn(smmu->dev, "failed to enable gerror irq\n"); |
Robin Murphy | 4c8996d | 2017-10-30 12:14:02 +0000 | [diff] [blame] | 3090 | } else { |
| 3091 | dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3092 | } |
| 3093 | |
| 3094 | if (smmu->features & ARM_SMMU_FEAT_PRI) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3095 | irq = smmu->priq.q.irq; |
| 3096 | if (irq) { |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 3097 | ret = devm_request_threaded_irq(smmu->dev, irq, NULL, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3098 | arm_smmu_priq_thread, |
Jean-Philippe Brucker | b4163fb | 2016-08-22 14:42:24 +0100 | [diff] [blame] | 3099 | IRQF_ONESHOT, |
| 3100 | "arm-smmu-v3-priq", |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3101 | smmu); |
Arnd Bergmann | 287980e | 2016-05-27 23:23:25 +0200 | [diff] [blame] | 3102 | if (ret < 0) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3103 | dev_warn(smmu->dev, |
| 3104 | "failed to enable priq irq\n"); |
Robin Murphy | 4c8996d | 2017-10-30 12:14:02 +0000 | [diff] [blame] | 3105 | } else { |
| 3106 | dev_warn(smmu->dev, "no priq irq - PRI will be broken\n"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3107 | } |
| 3108 | } |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 3109 | } |
| 3110 | |
| 3111 | static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) |
| 3112 | { |
| 3113 | int ret, irq; |
| 3114 | u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; |
| 3115 | |
| 3116 | /* Disable IRQs first */ |
| 3117 | ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, |
| 3118 | ARM_SMMU_IRQ_CTRLACK); |
| 3119 | if (ret) { |
| 3120 | dev_err(smmu->dev, "failed to disable irqs\n"); |
| 3121 | return ret; |
| 3122 | } |
| 3123 | |
| 3124 | irq = smmu->combined_irq; |
| 3125 | if (irq) { |
| 3126 | /* |
John Garry | 657135f | 2018-08-17 23:42:22 +0800 | [diff] [blame] | 3127 | * Cavium ThunderX2 implementation doesn't support unique irq |
| 3128 | * lines. Use a single irq line for all the SMMUv3 interrupts. |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 3129 | */ |
| 3130 | ret = devm_request_threaded_irq(smmu->dev, irq, |
| 3131 | arm_smmu_combined_irq_handler, |
| 3132 | arm_smmu_combined_irq_thread, |
| 3133 | IRQF_ONESHOT, |
| 3134 | "arm-smmu-v3-combined-irq", smmu); |
| 3135 | if (ret < 0) |
| 3136 | dev_warn(smmu->dev, "failed to enable combined irq\n"); |
| 3137 | } else |
| 3138 | arm_smmu_setup_unique_irqs(smmu); |
| 3139 | |
| 3140 | if (smmu->features & ARM_SMMU_FEAT_PRI) |
| 3141 | irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3142 | |
| 3143 | /* Enable interrupt generation on the SMMU */ |
Marc Zyngier | ccd6385 | 2015-07-15 11:55:18 +0100 | [diff] [blame] | 3144 | ret = arm_smmu_write_reg_sync(smmu, irqen_flags, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3145 | ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK); |
| 3146 | if (ret) |
| 3147 | dev_warn(smmu->dev, "failed to enable irqs\n"); |
| 3148 | |
| 3149 | return 0; |
| 3150 | } |
| 3151 | |
| 3152 | static int arm_smmu_device_disable(struct arm_smmu_device *smmu) |
| 3153 | { |
| 3154 | int ret; |
| 3155 | |
| 3156 | ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK); |
| 3157 | if (ret) |
| 3158 | dev_err(smmu->dev, "failed to clear cr0\n"); |
| 3159 | |
| 3160 | return ret; |
| 3161 | } |
| 3162 | |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 3163 | static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3164 | { |
| 3165 | int ret; |
| 3166 | u32 reg, enables; |
| 3167 | struct arm_smmu_cmdq_ent cmd; |
| 3168 | |
| 3169 | /* Clear CR0 and sync (disables SMMU and queue processing) */ |
| 3170 | reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); |
Will Deacon | b63b343 | 2018-07-25 15:58:43 +0100 | [diff] [blame] | 3171 | if (reg & CR0_SMMUEN) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3172 | dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); |
Will Deacon | 3f54c44 | 2019-04-23 11:59:36 +0100 | [diff] [blame] | 3173 | WARN_ON(is_kdump_kernel() && !disable_bypass); |
| 3174 | arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); |
Will Deacon | b63b343 | 2018-07-25 15:58:43 +0100 | [diff] [blame] | 3175 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3176 | |
| 3177 | ret = arm_smmu_device_disable(smmu); |
| 3178 | if (ret) |
| 3179 | return ret; |
| 3180 | |
| 3181 | /* CR1 (table and queue memory attributes) */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3182 | reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) | |
| 3183 | FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) | |
| 3184 | FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) | |
| 3185 | FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) | |
| 3186 | FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) | |
| 3187 | FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3188 | writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); |
| 3189 | |
| 3190 | /* CR2 (random crap) */ |
| 3191 | reg = CR2_PTM | CR2_RECINVSID | CR2_E2H; |
| 3192 | writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); |
| 3193 | |
| 3194 | /* Stream table */ |
| 3195 | writeq_relaxed(smmu->strtab_cfg.strtab_base, |
| 3196 | smmu->base + ARM_SMMU_STRTAB_BASE); |
| 3197 | writel_relaxed(smmu->strtab_cfg.strtab_base_cfg, |
| 3198 | smmu->base + ARM_SMMU_STRTAB_BASE_CFG); |
| 3199 | |
| 3200 | /* Command queue */ |
| 3201 | writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 3202 | writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); |
| 3203 | writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3204 | |
| 3205 | enables = CR0_CMDQEN; |
| 3206 | ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, |
| 3207 | ARM_SMMU_CR0ACK); |
| 3208 | if (ret) { |
| 3209 | dev_err(smmu->dev, "failed to enable command queue\n"); |
| 3210 | return ret; |
| 3211 | } |
| 3212 | |
| 3213 | /* Invalidate any cached configuration */ |
| 3214 | cmd.opcode = CMDQ_OP_CFGI_ALL; |
| 3215 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
Robin Murphy | 2f657ad | 2017-08-31 14:44:25 +0100 | [diff] [blame] | 3216 | arm_smmu_cmdq_issue_sync(smmu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3217 | |
| 3218 | /* Invalidate any stale TLB entries */ |
| 3219 | if (smmu->features & ARM_SMMU_FEAT_HYP) { |
| 3220 | cmd.opcode = CMDQ_OP_TLBI_EL2_ALL; |
| 3221 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
| 3222 | } |
| 3223 | |
| 3224 | cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL; |
| 3225 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
Robin Murphy | 2f657ad | 2017-08-31 14:44:25 +0100 | [diff] [blame] | 3226 | arm_smmu_cmdq_issue_sync(smmu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3227 | |
| 3228 | /* Event queue */ |
| 3229 | writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 3230 | writel_relaxed(smmu->evtq.q.llq.prod, |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3231 | arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu)); |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 3232 | writel_relaxed(smmu->evtq.q.llq.cons, |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3233 | arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu)); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3234 | |
| 3235 | enables |= CR0_EVTQEN; |
| 3236 | ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, |
| 3237 | ARM_SMMU_CR0ACK); |
| 3238 | if (ret) { |
| 3239 | dev_err(smmu->dev, "failed to enable event queue\n"); |
| 3240 | return ret; |
| 3241 | } |
| 3242 | |
| 3243 | /* PRI queue */ |
| 3244 | if (smmu->features & ARM_SMMU_FEAT_PRI) { |
| 3245 | writeq_relaxed(smmu->priq.q.q_base, |
| 3246 | smmu->base + ARM_SMMU_PRIQ_BASE); |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 3247 | writel_relaxed(smmu->priq.q.llq.prod, |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3248 | arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu)); |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 3249 | writel_relaxed(smmu->priq.q.llq.cons, |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3250 | arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu)); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3251 | |
| 3252 | enables |= CR0_PRIQEN; |
| 3253 | ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, |
| 3254 | ARM_SMMU_CR0ACK); |
| 3255 | if (ret) { |
| 3256 | dev_err(smmu->dev, "failed to enable PRI queue\n"); |
| 3257 | return ret; |
| 3258 | } |
| 3259 | } |
| 3260 | |
Jean-Philippe Brucker | 9ce27af | 2019-04-17 19:24:47 +0100 | [diff] [blame] | 3261 | if (smmu->features & ARM_SMMU_FEAT_ATS) { |
| 3262 | enables |= CR0_ATSCHK; |
| 3263 | ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, |
| 3264 | ARM_SMMU_CR0ACK); |
| 3265 | if (ret) { |
| 3266 | dev_err(smmu->dev, "failed to enable ATS check\n"); |
| 3267 | return ret; |
| 3268 | } |
| 3269 | } |
| 3270 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3271 | ret = arm_smmu_setup_irqs(smmu); |
| 3272 | if (ret) { |
| 3273 | dev_err(smmu->dev, "failed to setup irqs\n"); |
| 3274 | return ret; |
| 3275 | } |
| 3276 | |
Will Deacon | 3f54c44 | 2019-04-23 11:59:36 +0100 | [diff] [blame] | 3277 | if (is_kdump_kernel()) |
| 3278 | enables &= ~(CR0_EVTQEN | CR0_PRIQEN); |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 3279 | |
| 3280 | /* Enable the SMMU interface, or ensure bypass */ |
| 3281 | if (!bypass || disable_bypass) { |
| 3282 | enables |= CR0_SMMUEN; |
| 3283 | } else { |
| 3284 | ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); |
Will Deacon | b63b343 | 2018-07-25 15:58:43 +0100 | [diff] [blame] | 3285 | if (ret) |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 3286 | return ret; |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 3287 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3288 | ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, |
| 3289 | ARM_SMMU_CR0ACK); |
| 3290 | if (ret) { |
| 3291 | dev_err(smmu->dev, "failed to enable SMMU interface\n"); |
| 3292 | return ret; |
| 3293 | } |
| 3294 | |
| 3295 | return 0; |
| 3296 | } |
| 3297 | |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3298 | static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3299 | { |
| 3300 | u32 reg; |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3301 | bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3302 | |
| 3303 | /* IDR0 */ |
| 3304 | reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); |
| 3305 | |
| 3306 | /* 2-level structures */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3307 | if (FIELD_GET(IDR0_ST_LVL, reg) == IDR0_ST_LVL_2LVL) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3308 | smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB; |
| 3309 | |
| 3310 | if (reg & IDR0_CD2L) |
| 3311 | smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB; |
| 3312 | |
| 3313 | /* |
| 3314 | * Translation table endianness. |
| 3315 | * We currently require the same endianness as the CPU, but this |
| 3316 | * could be changed later by adding a new IO_PGTABLE_QUIRK. |
| 3317 | */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3318 | switch (FIELD_GET(IDR0_TTENDIAN, reg)) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3319 | case IDR0_TTENDIAN_MIXED: |
| 3320 | smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE; |
| 3321 | break; |
| 3322 | #ifdef __BIG_ENDIAN |
| 3323 | case IDR0_TTENDIAN_BE: |
| 3324 | smmu->features |= ARM_SMMU_FEAT_TT_BE; |
| 3325 | break; |
| 3326 | #else |
| 3327 | case IDR0_TTENDIAN_LE: |
| 3328 | smmu->features |= ARM_SMMU_FEAT_TT_LE; |
| 3329 | break; |
| 3330 | #endif |
| 3331 | default: |
| 3332 | dev_err(smmu->dev, "unknown/unsupported TT endianness!\n"); |
| 3333 | return -ENXIO; |
| 3334 | } |
| 3335 | |
| 3336 | /* Boolean feature flags */ |
| 3337 | if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI) |
| 3338 | smmu->features |= ARM_SMMU_FEAT_PRI; |
| 3339 | |
| 3340 | if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS) |
| 3341 | smmu->features |= ARM_SMMU_FEAT_ATS; |
| 3342 | |
| 3343 | if (reg & IDR0_SEV) |
| 3344 | smmu->features |= ARM_SMMU_FEAT_SEV; |
| 3345 | |
| 3346 | if (reg & IDR0_MSI) |
| 3347 | smmu->features |= ARM_SMMU_FEAT_MSI; |
| 3348 | |
| 3349 | if (reg & IDR0_HYP) |
| 3350 | smmu->features |= ARM_SMMU_FEAT_HYP; |
| 3351 | |
| 3352 | /* |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3353 | * The coherency feature as set by FW is used in preference to the ID |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3354 | * register, but warn on mismatch. |
| 3355 | */ |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3356 | if (!!(reg & IDR0_COHACC) != coherent) |
Robin Murphy | 2a22baa | 2017-09-25 14:55:40 +0100 | [diff] [blame] | 3357 | dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n", |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3358 | coherent ? "true" : "false"); |
| 3359 | |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3360 | switch (FIELD_GET(IDR0_STALL_MODEL, reg)) { |
Prem Mallappa | 6380be0 | 2015-12-14 22:01:23 +0530 | [diff] [blame] | 3361 | case IDR0_STALL_MODEL_FORCE: |
Yisheng Xie | 9cff86fd2 | 2017-09-21 20:36:07 +0800 | [diff] [blame] | 3362 | smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; |
| 3363 | /* Fallthrough */ |
| 3364 | case IDR0_STALL_MODEL_STALL: |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3365 | smmu->features |= ARM_SMMU_FEAT_STALLS; |
Prem Mallappa | 6380be0 | 2015-12-14 22:01:23 +0530 | [diff] [blame] | 3366 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3367 | |
| 3368 | if (reg & IDR0_S1P) |
| 3369 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; |
| 3370 | |
| 3371 | if (reg & IDR0_S2P) |
| 3372 | smmu->features |= ARM_SMMU_FEAT_TRANS_S2; |
| 3373 | |
| 3374 | if (!(reg & (IDR0_S1P | IDR0_S2P))) { |
| 3375 | dev_err(smmu->dev, "no translation support!\n"); |
| 3376 | return -ENXIO; |
| 3377 | } |
| 3378 | |
| 3379 | /* We only support the AArch64 table format at present */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3380 | switch (FIELD_GET(IDR0_TTF, reg)) { |
Will Deacon | f0c453d | 2015-08-20 12:12:32 +0100 | [diff] [blame] | 3381 | case IDR0_TTF_AARCH32_64: |
| 3382 | smmu->ias = 40; |
| 3383 | /* Fallthrough */ |
| 3384 | case IDR0_TTF_AARCH64: |
| 3385 | break; |
| 3386 | default: |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3387 | dev_err(smmu->dev, "AArch64 table format not supported!\n"); |
| 3388 | return -ENXIO; |
| 3389 | } |
| 3390 | |
| 3391 | /* ASID/VMID sizes */ |
| 3392 | smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8; |
| 3393 | smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8; |
| 3394 | |
| 3395 | /* IDR1 */ |
| 3396 | reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1); |
| 3397 | if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) { |
| 3398 | dev_err(smmu->dev, "embedded implementation not supported\n"); |
| 3399 | return -ENXIO; |
| 3400 | } |
| 3401 | |
Will Deacon | d25f6ea | 2019-05-16 16:08:47 +0100 | [diff] [blame] | 3402 | /* Queue sizes, capped to ensure natural alignment */ |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 3403 | smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, |
| 3404 | FIELD_GET(IDR1_CMDQS, reg)); |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 3405 | if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 3406 | /* |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 3407 | * We don't support splitting up batches, so one batch of |
| 3408 | * commands plus an extra sync needs to fit inside the command |
| 3409 | * queue. There's also no way we can handle the weird alignment |
| 3410 | * restrictions on the base pointer for a unit-length queue. |
Will Deacon | 587e6c1 | 2019-07-02 17:16:25 +0100 | [diff] [blame] | 3411 | */ |
Will Deacon | 2af2e72 | 2019-07-02 17:16:33 +0100 | [diff] [blame] | 3412 | dev_err(smmu->dev, "command queue size <= %d entries not supported\n", |
| 3413 | CMDQ_BATCH_ENTRIES); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3414 | return -ENXIO; |
| 3415 | } |
| 3416 | |
Will Deacon | 52be863 | 2019-07-02 17:16:08 +0100 | [diff] [blame] | 3417 | smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, |
| 3418 | FIELD_GET(IDR1_EVTQS, reg)); |
| 3419 | smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, |
| 3420 | FIELD_GET(IDR1_PRIQS, reg)); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3421 | |
| 3422 | /* SID/SSID sizes */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3423 | smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); |
| 3424 | smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3425 | |
Nate Watterson | 692c4e4 | 2017-01-10 14:47:13 -0500 | [diff] [blame] | 3426 | /* |
| 3427 | * If the SMMU supports fewer bits than would fill a single L2 stream |
| 3428 | * table, use a linear table instead. |
| 3429 | */ |
| 3430 | if (smmu->sid_bits <= STRTAB_SPLIT) |
| 3431 | smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; |
| 3432 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3433 | /* IDR5 */ |
| 3434 | reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); |
| 3435 | |
| 3436 | /* Maximum number of outstanding stalls */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3437 | smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3438 | |
| 3439 | /* Page sizes */ |
| 3440 | if (reg & IDR5_GRAN64K) |
Robin Murphy | d546635 | 2016-05-09 17:20:09 +0100 | [diff] [blame] | 3441 | smmu->pgsize_bitmap |= SZ_64K | SZ_512M; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3442 | if (reg & IDR5_GRAN16K) |
Robin Murphy | d546635 | 2016-05-09 17:20:09 +0100 | [diff] [blame] | 3443 | smmu->pgsize_bitmap |= SZ_16K | SZ_32M; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3444 | if (reg & IDR5_GRAN4K) |
Robin Murphy | d546635 | 2016-05-09 17:20:09 +0100 | [diff] [blame] | 3445 | smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3446 | |
Robin Murphy | dcd189e | 2018-03-26 13:35:15 +0100 | [diff] [blame] | 3447 | /* Input address size */ |
| 3448 | if (FIELD_GET(IDR5_VAX, reg) == IDR5_VAX_52_BIT) |
| 3449 | smmu->features |= ARM_SMMU_FEAT_VAX; |
| 3450 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3451 | /* Output address size */ |
Robin Murphy | cbcee19 | 2018-03-26 13:35:10 +0100 | [diff] [blame] | 3452 | switch (FIELD_GET(IDR5_OAS, reg)) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3453 | case IDR5_OAS_32_BIT: |
| 3454 | smmu->oas = 32; |
| 3455 | break; |
| 3456 | case IDR5_OAS_36_BIT: |
| 3457 | smmu->oas = 36; |
| 3458 | break; |
| 3459 | case IDR5_OAS_40_BIT: |
| 3460 | smmu->oas = 40; |
| 3461 | break; |
| 3462 | case IDR5_OAS_42_BIT: |
| 3463 | smmu->oas = 42; |
| 3464 | break; |
| 3465 | case IDR5_OAS_44_BIT: |
| 3466 | smmu->oas = 44; |
| 3467 | break; |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 3468 | case IDR5_OAS_52_BIT: |
| 3469 | smmu->oas = 52; |
| 3470 | smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */ |
| 3471 | break; |
Will Deacon | 8543096 | 2015-08-03 10:35:40 +0100 | [diff] [blame] | 3472 | default: |
| 3473 | dev_info(smmu->dev, |
| 3474 | "unknown output address size. Truncating to 48-bit\n"); |
| 3475 | /* Fallthrough */ |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3476 | case IDR5_OAS_48_BIT: |
| 3477 | smmu->oas = 48; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3478 | } |
| 3479 | |
Robin Murphy | 6619c91 | 2018-03-26 13:35:14 +0100 | [diff] [blame] | 3480 | if (arm_smmu_ops.pgsize_bitmap == -1UL) |
| 3481 | arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; |
| 3482 | else |
| 3483 | arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; |
| 3484 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3485 | /* Set the DMA mask for our table walker */ |
| 3486 | if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas))) |
| 3487 | dev_warn(smmu->dev, |
| 3488 | "failed to set DMA mask for table walker\n"); |
| 3489 | |
Will Deacon | f0c453d | 2015-08-20 12:12:32 +0100 | [diff] [blame] | 3490 | smmu->ias = max(smmu->ias, smmu->oas); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3491 | |
| 3492 | dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", |
| 3493 | smmu->ias, smmu->oas, smmu->features); |
| 3494 | return 0; |
| 3495 | } |
| 3496 | |
Lorenzo Pieralisi | e4dadfa | 2016-11-21 10:01:43 +0000 | [diff] [blame] | 3497 | #ifdef CONFIG_ACPI |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3498 | static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) |
| 3499 | { |
shameer | 99caf17 | 2017-05-17 10:12:05 +0100 | [diff] [blame] | 3500 | switch (model) { |
| 3501 | case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX: |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3502 | smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; |
shameer | 99caf17 | 2017-05-17 10:12:05 +0100 | [diff] [blame] | 3503 | break; |
Robin Murphy | 6948d4a | 2017-09-22 15:04:00 +0100 | [diff] [blame] | 3504 | case ACPI_IORT_SMMU_V3_HISILICON_HI161X: |
shameer | 99caf17 | 2017-05-17 10:12:05 +0100 | [diff] [blame] | 3505 | smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; |
| 3506 | break; |
| 3507 | } |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3508 | |
| 3509 | dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); |
| 3510 | } |
| 3511 | |
Lorenzo Pieralisi | e4dadfa | 2016-11-21 10:01:43 +0000 | [diff] [blame] | 3512 | static int arm_smmu_device_acpi_probe(struct platform_device *pdev, |
| 3513 | struct arm_smmu_device *smmu) |
| 3514 | { |
| 3515 | struct acpi_iort_smmu_v3 *iort_smmu; |
| 3516 | struct device *dev = smmu->dev; |
| 3517 | struct acpi_iort_node *node; |
| 3518 | |
| 3519 | node = *(struct acpi_iort_node **)dev_get_platdata(dev); |
| 3520 | |
| 3521 | /* Retrieve SMMUv3 specific data */ |
| 3522 | iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data; |
| 3523 | |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3524 | acpi_smmu_get_options(iort_smmu->model, smmu); |
| 3525 | |
Lorenzo Pieralisi | e4dadfa | 2016-11-21 10:01:43 +0000 | [diff] [blame] | 3526 | if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) |
| 3527 | smmu->features |= ARM_SMMU_FEAT_COHERENCY; |
| 3528 | |
| 3529 | return 0; |
| 3530 | } |
| 3531 | #else |
| 3532 | static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev, |
| 3533 | struct arm_smmu_device *smmu) |
| 3534 | { |
| 3535 | return -ENODEV; |
| 3536 | } |
| 3537 | #endif |
| 3538 | |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3539 | static int arm_smmu_device_dt_probe(struct platform_device *pdev, |
| 3540 | struct arm_smmu_device *smmu) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3541 | { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3542 | struct device *dev = &pdev->dev; |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 3543 | u32 cells; |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3544 | int ret = -EINVAL; |
Robin Murphy | dc87a98 | 2016-09-12 17:13:44 +0100 | [diff] [blame] | 3545 | |
| 3546 | if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells)) |
| 3547 | dev_err(dev, "missing #iommu-cells property\n"); |
| 3548 | else if (cells != 1) |
| 3549 | dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); |
| 3550 | else |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3551 | ret = 0; |
| 3552 | |
| 3553 | parse_driver_options(smmu); |
| 3554 | |
| 3555 | if (of_dma_is_coherent(dev->of_node)) |
| 3556 | smmu->features |= ARM_SMMU_FEAT_COHERENCY; |
| 3557 | |
| 3558 | return ret; |
| 3559 | } |
| 3560 | |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3561 | static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) |
| 3562 | { |
| 3563 | if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY) |
| 3564 | return SZ_64K; |
| 3565 | else |
| 3566 | return SZ_128K; |
| 3567 | } |
| 3568 | |
Will Deacon | ab24677 | 2019-12-19 12:03:47 +0000 | [diff] [blame] | 3569 | static int arm_smmu_set_bus_ops(struct iommu_ops *ops) |
| 3570 | { |
| 3571 | int err; |
| 3572 | |
| 3573 | #ifdef CONFIG_PCI |
| 3574 | if (pci_bus_type.iommu_ops != ops) { |
Will Deacon | ab24677 | 2019-12-19 12:03:47 +0000 | [diff] [blame] | 3575 | err = bus_set_iommu(&pci_bus_type, ops); |
| 3576 | if (err) |
| 3577 | return err; |
| 3578 | } |
| 3579 | #endif |
| 3580 | #ifdef CONFIG_ARM_AMBA |
| 3581 | if (amba_bustype.iommu_ops != ops) { |
| 3582 | err = bus_set_iommu(&amba_bustype, ops); |
| 3583 | if (err) |
| 3584 | goto err_reset_pci_ops; |
| 3585 | } |
| 3586 | #endif |
| 3587 | if (platform_bus_type.iommu_ops != ops) { |
| 3588 | err = bus_set_iommu(&platform_bus_type, ops); |
| 3589 | if (err) |
| 3590 | goto err_reset_amba_ops; |
| 3591 | } |
| 3592 | |
| 3593 | return 0; |
| 3594 | |
| 3595 | err_reset_amba_ops: |
| 3596 | #ifdef CONFIG_ARM_AMBA |
| 3597 | bus_set_iommu(&amba_bustype, NULL); |
| 3598 | #endif |
| 3599 | err_reset_pci_ops: __maybe_unused; |
| 3600 | #ifdef CONFIG_PCI |
| 3601 | bus_set_iommu(&pci_bus_type, NULL); |
| 3602 | #endif |
| 3603 | return err; |
| 3604 | } |
| 3605 | |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3606 | static int arm_smmu_device_probe(struct platform_device *pdev) |
| 3607 | { |
| 3608 | int irq, ret; |
| 3609 | struct resource *res; |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 3610 | resource_size_t ioaddr; |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3611 | struct arm_smmu_device *smmu; |
| 3612 | struct device *dev = &pdev->dev; |
| 3613 | bool bypass; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3614 | |
| 3615 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); |
| 3616 | if (!smmu) { |
| 3617 | dev_err(dev, "failed to allocate arm_smmu_device\n"); |
| 3618 | return -ENOMEM; |
| 3619 | } |
| 3620 | smmu->dev = dev; |
| 3621 | |
Linu Cherian | e5b829d | 2017-06-22 17:35:37 +0530 | [diff] [blame] | 3622 | if (dev->of_node) { |
| 3623 | ret = arm_smmu_device_dt_probe(pdev, smmu); |
| 3624 | } else { |
| 3625 | ret = arm_smmu_device_acpi_probe(pdev, smmu); |
| 3626 | if (ret == -ENODEV) |
| 3627 | return ret; |
| 3628 | } |
| 3629 | |
| 3630 | /* Set bypass mode according to firmware probing result */ |
| 3631 | bypass = !!ret; |
| 3632 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3633 | /* Base address */ |
| 3634 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
Masahiro Yamada | 322a9bb | 2019-12-26 18:50:56 +0900 | [diff] [blame] | 3635 | if (resource_size(res) < arm_smmu_resource_size(smmu)) { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3636 | dev_err(dev, "MMIO region too small (%pr)\n", res); |
| 3637 | return -EINVAL; |
| 3638 | } |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 3639 | ioaddr = res->start; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3640 | |
| 3641 | smmu->base = devm_ioremap_resource(dev, res); |
| 3642 | if (IS_ERR(smmu->base)) |
| 3643 | return PTR_ERR(smmu->base); |
| 3644 | |
| 3645 | /* Interrupt lines */ |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3646 | |
Jean-Philippe Brucker | f7aff1a | 2019-11-11 12:17:20 +0100 | [diff] [blame] | 3647 | irq = platform_get_irq_byname_optional(pdev, "combined"); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3648 | if (irq > 0) |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 3649 | smmu->combined_irq = irq; |
| 3650 | else { |
Jean-Philippe Brucker | f7aff1a | 2019-11-11 12:17:20 +0100 | [diff] [blame] | 3651 | irq = platform_get_irq_byname_optional(pdev, "eventq"); |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 3652 | if (irq > 0) |
| 3653 | smmu->evtq.q.irq = irq; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3654 | |
Jean-Philippe Brucker | f7aff1a | 2019-11-11 12:17:20 +0100 | [diff] [blame] | 3655 | irq = platform_get_irq_byname_optional(pdev, "priq"); |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 3656 | if (irq > 0) |
| 3657 | smmu->priq.q.irq = irq; |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3658 | |
Jean-Philippe Brucker | f7aff1a | 2019-11-11 12:17:20 +0100 | [diff] [blame] | 3659 | irq = platform_get_irq_byname_optional(pdev, "gerror"); |
Geetha Sowjanya | f935448 | 2017-06-23 19:04:36 +0530 | [diff] [blame] | 3660 | if (irq > 0) |
| 3661 | smmu->gerr_irq = irq; |
| 3662 | } |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3663 | /* Probe the h/w */ |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3664 | ret = arm_smmu_device_hw_probe(smmu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3665 | if (ret) |
| 3666 | return ret; |
| 3667 | |
| 3668 | /* Initialise in-memory data structures */ |
| 3669 | ret = arm_smmu_init_structures(smmu); |
| 3670 | if (ret) |
| 3671 | return ret; |
| 3672 | |
Marc Zyngier | 166bdbd | 2015-10-13 18:32:30 +0100 | [diff] [blame] | 3673 | /* Record our private device structure */ |
| 3674 | platform_set_drvdata(pdev, smmu); |
| 3675 | |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3676 | /* Reset the device */ |
Robin Murphy | 8f78515 | 2016-09-12 17:13:45 +0100 | [diff] [blame] | 3677 | ret = arm_smmu_device_reset(smmu, bypass); |
| 3678 | if (ret) |
| 3679 | return ret; |
| 3680 | |
| 3681 | /* And we're up. Go go go! */ |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 3682 | ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, |
| 3683 | "smmu3.%pa", &ioaddr); |
Robin Murphy | 08d4ca2 | 2016-09-12 17:13:46 +0100 | [diff] [blame] | 3684 | if (ret) |
| 3685 | return ret; |
Joerg Roedel | 9648cbc | 2017-02-01 18:11:36 +0100 | [diff] [blame] | 3686 | |
| 3687 | iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); |
| 3688 | iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); |
| 3689 | |
| 3690 | ret = iommu_device_register(&smmu->iommu); |
Arvind Yadav | 5c2d021 | 2017-06-22 12:57:42 +0530 | [diff] [blame] | 3691 | if (ret) { |
| 3692 | dev_err(dev, "Failed to register iommu\n"); |
| 3693 | return ret; |
| 3694 | } |
Lorenzo Pieralisi | 778de07 | 2016-11-21 10:01:38 +0000 | [diff] [blame] | 3695 | |
Will Deacon | ab24677 | 2019-12-19 12:03:47 +0000 | [diff] [blame] | 3696 | return arm_smmu_set_bus_ops(&arm_smmu_ops); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3697 | } |
| 3698 | |
Will Deacon | 6e8fa74 | 2019-12-19 12:03:44 +0000 | [diff] [blame] | 3699 | static int arm_smmu_device_remove(struct platform_device *pdev) |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3700 | { |
Will Deacon | 941a802 | 2015-08-11 16:25:10 +0100 | [diff] [blame] | 3701 | struct arm_smmu_device *smmu = platform_get_drvdata(pdev); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3702 | |
Will Deacon | ab24677 | 2019-12-19 12:03:47 +0000 | [diff] [blame] | 3703 | arm_smmu_set_bus_ops(NULL); |
| 3704 | iommu_device_unregister(&smmu->iommu); |
| 3705 | iommu_device_sysfs_remove(&smmu->iommu); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3706 | arm_smmu_device_disable(smmu); |
Will Deacon | 6e8fa74 | 2019-12-19 12:03:44 +0000 | [diff] [blame] | 3707 | |
| 3708 | return 0; |
| 3709 | } |
| 3710 | |
| 3711 | static void arm_smmu_device_shutdown(struct platform_device *pdev) |
| 3712 | { |
| 3713 | arm_smmu_device_remove(pdev); |
Nate Watterson | 7aa8619 | 2017-06-29 18:18:15 -0400 | [diff] [blame] | 3714 | } |
| 3715 | |
Arvind Yadav | ebdd13c | 2017-06-22 12:51:00 +0530 | [diff] [blame] | 3716 | static const struct of_device_id arm_smmu_of_match[] = { |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3717 | { .compatible = "arm,smmu-v3", }, |
| 3718 | { }, |
| 3719 | }; |
Will Deacon | 6e8fa74 | 2019-12-19 12:03:44 +0000 | [diff] [blame] | 3720 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3721 | |
| 3722 | static struct platform_driver arm_smmu_driver = { |
| 3723 | .driver = { |
Will Deacon | 34debdc | 2019-12-19 12:03:46 +0000 | [diff] [blame] | 3724 | .name = "arm-smmu-v3", |
Masahiro Yamada | 8efda06 | 2019-12-24 17:14:59 +0900 | [diff] [blame^] | 3725 | .of_match_table = arm_smmu_of_match, |
Will Deacon | 34debdc | 2019-12-19 12:03:46 +0000 | [diff] [blame] | 3726 | .suppress_bind_attrs = true, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3727 | }, |
Lorenzo Pieralisi | 2985b52 | 2016-11-21 10:01:42 +0000 | [diff] [blame] | 3728 | .probe = arm_smmu_device_probe, |
Will Deacon | 6e8fa74 | 2019-12-19 12:03:44 +0000 | [diff] [blame] | 3729 | .remove = arm_smmu_device_remove, |
Nate Watterson | 7aa8619 | 2017-06-29 18:18:15 -0400 | [diff] [blame] | 3730 | .shutdown = arm_smmu_device_shutdown, |
Will Deacon | 48ec83b | 2015-05-27 17:25:59 +0100 | [diff] [blame] | 3731 | }; |
Will Deacon | 6e8fa74 | 2019-12-19 12:03:44 +0000 | [diff] [blame] | 3732 | module_platform_driver(arm_smmu_driver); |
| 3733 | |
| 3734 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); |
Will Deacon | 1ea27ee | 2019-12-19 12:03:52 +0000 | [diff] [blame] | 3735 | MODULE_AUTHOR("Will Deacon <will@kernel.org>"); |
Ard Biesheuvel | d3daf66 | 2019-12-19 12:03:48 +0000 | [diff] [blame] | 3736 | MODULE_ALIAS("platform:arm-smmu-v3"); |
Will Deacon | 6e8fa74 | 2019-12-19 12:03:44 +0000 | [diff] [blame] | 3737 | MODULE_LICENSE("GPL v2"); |