Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. |
Alex Elder | 74858b6 | 2021-04-09 13:07:20 -0500 | [diff] [blame] | 4 | * Copyright (C) 2019-2021 Linaro Ltd. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/types.h> |
| 8 | #include <linux/bitfield.h> |
| 9 | #include <linux/bug.h> |
| 10 | #include <linux/dma-mapping.h> |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 11 | #include <linux/iommu.h> |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 12 | #include <linux/io.h> |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 13 | #include <linux/soc/qcom/smem.h> |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 14 | |
| 15 | #include "ipa.h" |
| 16 | #include "ipa_reg.h" |
Alex Elder | 3128aae | 2020-05-04 12:58:57 -0500 | [diff] [blame] | 17 | #include "ipa_data.h" |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 18 | #include "ipa_cmd.h" |
| 19 | #include "ipa_mem.h" |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 20 | #include "ipa_table.h" |
| 21 | #include "gsi_trans.h" |
| 22 | |
| 23 | /* "Canary" value placed between memory regions to detect overflow */ |
| 24 | #define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef) |
| 25 | |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 26 | /* SMEM host id representing the modem. */ |
| 27 | #define QCOM_SMEM_HOST_MODEM 1 |
| 28 | |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 29 | const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id) |
| 30 | { |
Alex Elder | c61cfb9 | 2021-06-10 14:23:08 -0500 | [diff] [blame] | 31 | u32 i; |
| 32 | |
| 33 | for (i = 0; i < ipa->mem_count; i++) { |
| 34 | const struct ipa_mem *mem = &ipa->mem[i]; |
| 35 | |
| 36 | if (mem->id == mem_id) |
| 37 | return mem; |
| 38 | } |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 39 | |
| 40 | return NULL; |
| 41 | } |
| 42 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 43 | /* Add an immediate command to a transaction that zeroes a memory region */ |
| 44 | static void |
Alex Elder | ce928bf | 2021-06-10 14:23:04 -0500 | [diff] [blame] | 45 | ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 46 | { |
| 47 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 48 | const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 49 | dma_addr_t addr = ipa->zero_addr; |
| 50 | |
| 51 | if (!mem->size) |
| 52 | return; |
| 53 | |
| 54 | ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true); |
| 55 | } |
| 56 | |
| 57 | /** |
| 58 | * ipa_mem_setup() - Set up IPA AP and modem shared memory areas |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 59 | * @ipa: IPA pointer |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 60 | * |
| 61 | * Set up the shared memory regions in IPA local memory. This involves |
| 62 | * zero-filling memory regions, and in the case of header memory, telling |
| 63 | * the IPA where it's located. |
| 64 | * |
| 65 | * This function performs the initial setup of this memory. If the modem |
| 66 | * crashes, its regions are re-zeroed in ipa_mem_zero_modem(). |
| 67 | * |
| 68 | * The AP informs the modem where its portions of memory are located |
| 69 | * in a QMI exchange that occurs at modem startup. |
| 70 | * |
Alex Elder | 74858b6 | 2021-04-09 13:07:20 -0500 | [diff] [blame] | 71 | * There is no need for a matching ipa_mem_teardown() function. |
| 72 | * |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 73 | * Return: 0 if successful, or a negative error code |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 74 | */ |
| 75 | int ipa_mem_setup(struct ipa *ipa) |
| 76 | { |
| 77 | dma_addr_t addr = ipa->zero_addr; |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 78 | const struct ipa_mem *mem; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 79 | struct gsi_trans *trans; |
| 80 | u32 offset; |
| 81 | u16 size; |
Alex Elder | e6e49e4 | 2021-03-24 08:15:26 -0500 | [diff] [blame] | 82 | u32 val; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 83 | |
| 84 | /* Get a transaction to define the header memory region and to zero |
| 85 | * the processing context and modem memory regions. |
| 86 | */ |
| 87 | trans = ipa_cmd_trans_alloc(ipa, 4); |
| 88 | if (!trans) { |
| 89 | dev_err(&ipa->pdev->dev, "no transaction for memory setup\n"); |
| 90 | return -EBUSY; |
| 91 | } |
| 92 | |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 93 | /* Initialize IPA-local header memory. The AP header region, if |
| 94 | * present, is contiguous with and follows the modem header region, |
| 95 | * and they are initialized together. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 96 | */ |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 97 | mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER); |
| 98 | offset = mem->offset; |
| 99 | size = mem->size; |
| 100 | mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER); |
| 101 | if (mem) |
| 102 | size += mem->size; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 103 | |
| 104 | ipa_cmd_hdr_init_local_add(trans, offset, size, addr); |
| 105 | |
Alex Elder | ce928bf | 2021-06-10 14:23:04 -0500 | [diff] [blame] | 106 | ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX); |
| 107 | ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX); |
| 108 | ipa_mem_zero_region_add(trans, IPA_MEM_MODEM); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 109 | |
| 110 | gsi_trans_commit_wait(trans); |
| 111 | |
| 112 | /* Tell the hardware where the processing context area is located */ |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 113 | mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX); |
| 114 | offset = ipa->mem_offset + mem->offset; |
Alex Elder | e6e49e4 | 2021-03-24 08:15:26 -0500 | [diff] [blame] | 115 | val = proc_cntxt_base_addr_encoded(ipa->version, offset); |
| 116 | iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 117 | |
| 118 | return 0; |
| 119 | } |
| 120 | |
Alex Elder | 75bcfde | 2021-06-09 17:35:01 -0500 | [diff] [blame] | 121 | /* Is the given memory region ID is valid for the current IPA version? */ |
| 122 | static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id) |
| 123 | { |
| 124 | enum ipa_version version = ipa->version; |
| 125 | |
| 126 | switch (mem_id) { |
| 127 | case IPA_MEM_UC_SHARED: |
| 128 | case IPA_MEM_UC_INFO: |
| 129 | case IPA_MEM_V4_FILTER_HASHED: |
| 130 | case IPA_MEM_V4_FILTER: |
| 131 | case IPA_MEM_V6_FILTER_HASHED: |
| 132 | case IPA_MEM_V6_FILTER: |
| 133 | case IPA_MEM_V4_ROUTE_HASHED: |
| 134 | case IPA_MEM_V4_ROUTE: |
| 135 | case IPA_MEM_V6_ROUTE_HASHED: |
| 136 | case IPA_MEM_V6_ROUTE: |
| 137 | case IPA_MEM_MODEM_HEADER: |
| 138 | case IPA_MEM_AP_HEADER: |
| 139 | case IPA_MEM_MODEM_PROC_CTX: |
| 140 | case IPA_MEM_AP_PROC_CTX: |
| 141 | case IPA_MEM_MODEM: |
| 142 | case IPA_MEM_UC_EVENT_RING: |
| 143 | case IPA_MEM_PDN_CONFIG: |
| 144 | case IPA_MEM_STATS_QUOTA_MODEM: |
| 145 | case IPA_MEM_STATS_QUOTA_AP: |
| 146 | case IPA_MEM_END_MARKER: /* pseudo region */ |
| 147 | break; |
| 148 | |
| 149 | case IPA_MEM_STATS_TETHERING: |
| 150 | case IPA_MEM_STATS_DROP: |
| 151 | if (version < IPA_VERSION_4_0) |
| 152 | return false; |
| 153 | break; |
| 154 | |
| 155 | case IPA_MEM_STATS_V4_FILTER: |
| 156 | case IPA_MEM_STATS_V6_FILTER: |
| 157 | case IPA_MEM_STATS_V4_ROUTE: |
| 158 | case IPA_MEM_STATS_V6_ROUTE: |
| 159 | if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2) |
| 160 | return false; |
| 161 | break; |
| 162 | |
| 163 | case IPA_MEM_NAT_TABLE: |
| 164 | case IPA_MEM_STATS_FILTER_ROUTE: |
| 165 | if (version < IPA_VERSION_4_5) |
| 166 | return false; |
| 167 | break; |
| 168 | |
| 169 | default: |
| 170 | return false; |
| 171 | } |
| 172 | |
| 173 | return true; |
| 174 | } |
| 175 | |
Alex Elder | d39ffb9 | 2021-06-09 17:35:00 -0500 | [diff] [blame] | 176 | /* Must the given memory region be present in the configuration? */ |
| 177 | static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id) |
| 178 | { |
| 179 | switch (mem_id) { |
| 180 | case IPA_MEM_UC_SHARED: |
| 181 | case IPA_MEM_UC_INFO: |
| 182 | case IPA_MEM_V4_FILTER_HASHED: |
| 183 | case IPA_MEM_V4_FILTER: |
| 184 | case IPA_MEM_V6_FILTER_HASHED: |
| 185 | case IPA_MEM_V6_FILTER: |
| 186 | case IPA_MEM_V4_ROUTE_HASHED: |
| 187 | case IPA_MEM_V4_ROUTE: |
| 188 | case IPA_MEM_V6_ROUTE_HASHED: |
| 189 | case IPA_MEM_V6_ROUTE: |
| 190 | case IPA_MEM_MODEM_HEADER: |
| 191 | case IPA_MEM_MODEM_PROC_CTX: |
| 192 | case IPA_MEM_AP_PROC_CTX: |
| 193 | case IPA_MEM_MODEM: |
| 194 | return true; |
| 195 | |
| 196 | case IPA_MEM_PDN_CONFIG: |
| 197 | case IPA_MEM_STATS_QUOTA_MODEM: |
| 198 | case IPA_MEM_STATS_TETHERING: |
| 199 | return ipa->version >= IPA_VERSION_4_0; |
| 200 | |
| 201 | default: |
| 202 | return false; /* Anything else is optional */ |
| 203 | } |
| 204 | } |
| 205 | |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 206 | static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 207 | { |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 208 | struct device *dev = &ipa->pdev->dev; |
Alex Elder | 0300df2 | 2021-06-09 17:34:55 -0500 | [diff] [blame] | 209 | enum ipa_mem_id mem_id = mem->id; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 210 | u16 size_multiple; |
| 211 | |
Alex Elder | 75bcfde | 2021-06-09 17:35:01 -0500 | [diff] [blame] | 212 | /* Make sure the memory region is valid for this version of IPA */ |
| 213 | if (!ipa_mem_id_valid(ipa, mem_id)) { |
| 214 | dev_err(dev, "region id %u not valid\n", mem_id); |
| 215 | return false; |
| 216 | } |
| 217 | |
Alex Elder | c61cfb9 | 2021-06-10 14:23:08 -0500 | [diff] [blame] | 218 | if (!mem->size && !mem->canary_count) { |
| 219 | dev_err(dev, "empty memory region %u\n", mem_id); |
| 220 | return false; |
| 221 | } |
| 222 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 223 | /* Other than modem memory, sizes must be a multiple of 8 */ |
| 224 | size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8; |
| 225 | if (mem->size % size_multiple) |
| 226 | dev_err(dev, "region %u size not a multiple of %u bytes\n", |
| 227 | mem_id, size_multiple); |
| 228 | else if (mem->offset % 8) |
| 229 | dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id); |
| 230 | else if (mem->offset < mem->canary_count * sizeof(__le32)) |
| 231 | dev_err(dev, "region %u offset too small for %hu canaries\n", |
| 232 | mem_id, mem->canary_count); |
Alex Elder | f636a83 | 2021-06-09 17:34:53 -0500 | [diff] [blame] | 233 | else if (mem_id == IPA_MEM_END_MARKER && mem->size) |
| 234 | dev_err(dev, "non-zero end marker region size\n"); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 235 | else |
| 236 | return true; |
| 237 | |
| 238 | return false; |
| 239 | } |
| 240 | |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 241 | /* Verify each defined memory region is valid. */ |
Alex Elder | 1eec767 | 2021-06-09 17:34:59 -0500 | [diff] [blame] | 242 | static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data) |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 243 | { |
Alex Elder | eadf7f9 | 2021-06-09 17:35:02 -0500 | [diff] [blame] | 244 | DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { }; |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 245 | struct device *dev = &ipa->pdev->dev; |
| 246 | enum ipa_mem_id mem_id; |
Alex Elder | 8cc7ebb | 2021-06-10 14:23:01 -0500 | [diff] [blame] | 247 | u32 i; |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 248 | |
Alex Elder | 1eec767 | 2021-06-09 17:34:59 -0500 | [diff] [blame] | 249 | if (mem_data->local_count > IPA_MEM_COUNT) { |
| 250 | dev_err(dev, "too many memory regions (%u > %u)\n", |
| 251 | mem_data->local_count, IPA_MEM_COUNT); |
| 252 | return false; |
| 253 | } |
| 254 | |
Alex Elder | 8cc7ebb | 2021-06-10 14:23:01 -0500 | [diff] [blame] | 255 | for (i = 0; i < mem_data->local_count; i++) { |
| 256 | const struct ipa_mem *mem = &mem_data->local[i]; |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 257 | |
Alex Elder | eadf7f9 | 2021-06-09 17:35:02 -0500 | [diff] [blame] | 258 | if (__test_and_set_bit(mem->id, regions)) { |
| 259 | dev_err(dev, "duplicate memory region %u\n", mem->id); |
| 260 | return false; |
| 261 | } |
| 262 | |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 263 | /* Defined regions have non-zero size and/or canary count */ |
Alex Elder | c61cfb9 | 2021-06-10 14:23:08 -0500 | [diff] [blame] | 264 | if (!ipa_mem_valid_one(ipa, mem)) |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 265 | return false; |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 266 | } |
| 267 | |
Alex Elder | d39ffb9 | 2021-06-09 17:35:00 -0500 | [diff] [blame] | 268 | /* Now see if any required regions are not defined */ |
Christophe JAILLET | a6366b1 | 2021-11-18 20:37:15 +0100 | [diff] [blame] | 269 | for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) { |
Alex Elder | 6857b02 | 2021-06-09 17:35:03 -0500 | [diff] [blame] | 270 | if (ipa_mem_id_required(ipa, mem_id)) |
Alex Elder | d39ffb9 | 2021-06-09 17:35:00 -0500 | [diff] [blame] | 271 | dev_err(dev, "required memory region %u missing\n", |
| 272 | mem_id); |
Alex Elder | 6857b02 | 2021-06-09 17:35:03 -0500 | [diff] [blame] | 273 | } |
Alex Elder | d39ffb9 | 2021-06-09 17:35:00 -0500 | [diff] [blame] | 274 | |
Alex Elder | 98334d2 | 2021-06-09 17:34:56 -0500 | [diff] [blame] | 275 | return true; |
| 276 | } |
| 277 | |
Alex Elder | 5e57c6c | 2021-06-09 17:34:57 -0500 | [diff] [blame] | 278 | /* Do all memory regions fit within the IPA local memory? */ |
| 279 | static bool ipa_mem_size_valid(struct ipa *ipa) |
| 280 | { |
| 281 | struct device *dev = &ipa->pdev->dev; |
| 282 | u32 limit = ipa->mem_size; |
Alex Elder | 8cc7ebb | 2021-06-10 14:23:01 -0500 | [diff] [blame] | 283 | u32 i; |
Alex Elder | 5e57c6c | 2021-06-09 17:34:57 -0500 | [diff] [blame] | 284 | |
Alex Elder | 8cc7ebb | 2021-06-10 14:23:01 -0500 | [diff] [blame] | 285 | for (i = 0; i < ipa->mem_count; i++) { |
| 286 | const struct ipa_mem *mem = &ipa->mem[i]; |
Alex Elder | 5e57c6c | 2021-06-09 17:34:57 -0500 | [diff] [blame] | 287 | |
| 288 | if (mem->offset + mem->size <= limit) |
| 289 | continue; |
| 290 | |
| 291 | dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n", |
Alex Elder | 8cc7ebb | 2021-06-10 14:23:01 -0500 | [diff] [blame] | 292 | mem->id, limit); |
Alex Elder | 5e57c6c | 2021-06-09 17:34:57 -0500 | [diff] [blame] | 293 | |
| 294 | return false; |
| 295 | } |
| 296 | |
| 297 | return true; |
| 298 | } |
| 299 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 300 | /** |
| 301 | * ipa_mem_config() - Configure IPA shared memory |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 302 | * @ipa: IPA pointer |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 303 | * |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 304 | * Return: 0 if successful, or a negative error code |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 305 | */ |
| 306 | int ipa_mem_config(struct ipa *ipa) |
| 307 | { |
| 308 | struct device *dev = &ipa->pdev->dev; |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 309 | const struct ipa_mem *mem; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 310 | dma_addr_t addr; |
| 311 | u32 mem_size; |
| 312 | void *virt; |
| 313 | u32 val; |
Alex Elder | 8cc7ebb | 2021-06-10 14:23:01 -0500 | [diff] [blame] | 314 | u32 i; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 315 | |
| 316 | /* Check the advertised location and size of the shared memory area */ |
| 317 | val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET); |
| 318 | |
| 319 | /* The fields in the register are in 8 byte units */ |
| 320 | ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK); |
| 321 | /* Make sure the end is within the region's mapped space */ |
| 322 | mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK); |
| 323 | |
| 324 | /* If the sizes don't match, issue a warning */ |
Alex Elder | 2c642c4 | 2020-11-09 10:56:34 -0600 | [diff] [blame] | 325 | if (ipa->mem_offset + mem_size < ipa->mem_size) { |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 326 | dev_warn(dev, "limiting IPA memory size to 0x%08x\n", |
| 327 | mem_size); |
| 328 | ipa->mem_size = mem_size; |
Alex Elder | 2c642c4 | 2020-11-09 10:56:34 -0600 | [diff] [blame] | 329 | } else if (ipa->mem_offset + mem_size > ipa->mem_size) { |
| 330 | dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n", |
| 331 | mem_size); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 332 | } |
| 333 | |
Alex Elder | 5e57c6c | 2021-06-09 17:34:57 -0500 | [diff] [blame] | 334 | /* We know our memory size; make sure regions are all in range */ |
| 335 | if (!ipa_mem_size_valid(ipa)) |
| 336 | return -EINVAL; |
| 337 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 338 | /* Prealloc DMA memory for zeroing regions */ |
| 339 | virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL); |
| 340 | if (!virt) |
| 341 | return -ENOMEM; |
| 342 | ipa->zero_addr = addr; |
| 343 | ipa->zero_virt = virt; |
| 344 | ipa->zero_size = IPA_MEM_MAX; |
| 345 | |
Alex Elder | 8cc7ebb | 2021-06-10 14:23:01 -0500 | [diff] [blame] | 346 | /* For each defined region, write "canary" values in the |
| 347 | * space prior to the region's base address if indicated. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 348 | */ |
Alex Elder | 8cc7ebb | 2021-06-10 14:23:01 -0500 | [diff] [blame] | 349 | for (i = 0; i < ipa->mem_count; i++) { |
Alex Elder | c61cfb9 | 2021-06-10 14:23:08 -0500 | [diff] [blame] | 350 | u16 canary_count = ipa->mem[i].canary_count; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 351 | __le32 *canary; |
| 352 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 353 | if (!canary_count) |
| 354 | continue; |
| 355 | |
| 356 | /* Write canary values in the space before the region */ |
Alex Elder | c61cfb9 | 2021-06-10 14:23:08 -0500 | [diff] [blame] | 357 | canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 358 | do |
| 359 | *--canary = IPA_MEM_CANARY_VAL; |
| 360 | while (--canary_count); |
| 361 | } |
| 362 | |
| 363 | /* Make sure filter and route table memory regions are valid */ |
| 364 | if (!ipa_table_valid(ipa)) |
| 365 | goto err_dma_free; |
| 366 | |
| 367 | /* Validate memory-related properties relevant to immediate commands */ |
| 368 | if (!ipa_cmd_data_valid(ipa)) |
| 369 | goto err_dma_free; |
| 370 | |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 371 | /* Verify the microcontroller ring alignment (if defined) */ |
| 372 | mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING); |
| 373 | if (mem && mem->offset % 1024) { |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 374 | dev_err(dev, "microcontroller ring not 1024-byte aligned\n"); |
| 375 | goto err_dma_free; |
| 376 | } |
| 377 | |
| 378 | return 0; |
| 379 | |
| 380 | err_dma_free: |
| 381 | dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr); |
| 382 | |
| 383 | return -EINVAL; |
| 384 | } |
| 385 | |
| 386 | /* Inverse of ipa_mem_config() */ |
| 387 | void ipa_mem_deconfig(struct ipa *ipa) |
| 388 | { |
| 389 | struct device *dev = &ipa->pdev->dev; |
| 390 | |
| 391 | dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr); |
| 392 | ipa->zero_size = 0; |
| 393 | ipa->zero_virt = NULL; |
| 394 | ipa->zero_addr = 0; |
| 395 | } |
| 396 | |
| 397 | /** |
| 398 | * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 399 | * @ipa: IPA pointer |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 400 | * |
| 401 | * Zero regions of IPA-local memory used by the modem. These are configured |
| 402 | * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and |
| 403 | * restarts via SSR we need to re-initialize them. A QMI message tells the |
| 404 | * modem where to find regions of IPA local memory it needs to know about |
| 405 | * (these included). |
| 406 | */ |
| 407 | int ipa_mem_zero_modem(struct ipa *ipa) |
| 408 | { |
| 409 | struct gsi_trans *trans; |
| 410 | |
| 411 | /* Get a transaction to zero the modem memory, modem header, |
| 412 | * and modem processing context regions. |
| 413 | */ |
| 414 | trans = ipa_cmd_trans_alloc(ipa, 3); |
| 415 | if (!trans) { |
| 416 | dev_err(&ipa->pdev->dev, |
| 417 | "no transaction to zero modem memory\n"); |
| 418 | return -EBUSY; |
| 419 | } |
| 420 | |
Alex Elder | ce928bf | 2021-06-10 14:23:04 -0500 | [diff] [blame] | 421 | ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER); |
| 422 | ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX); |
| 423 | ipa_mem_zero_region_add(trans, IPA_MEM_MODEM); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 424 | |
| 425 | gsi_trans_commit_wait(trans); |
| 426 | |
| 427 | return 0; |
| 428 | } |
| 429 | |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 430 | /** |
| 431 | * ipa_imem_init() - Initialize IMEM memory used by the IPA |
| 432 | * @ipa: IPA pointer |
| 433 | * @addr: Physical address of the IPA region in IMEM |
| 434 | * @size: Size (bytes) of the IPA region in IMEM |
| 435 | * |
| 436 | * IMEM is a block of shared memory separate from system DRAM, and |
| 437 | * a portion of this memory is available for the IPA to use. The |
| 438 | * modem accesses this memory directly, but the IPA accesses it |
| 439 | * via the IOMMU, using the AP's credentials. |
| 440 | * |
| 441 | * If this region exists (size > 0) we map it for read/write access |
| 442 | * through the IOMMU using the IPA device. |
| 443 | * |
| 444 | * Note: @addr and @size are not guaranteed to be page-aligned. |
| 445 | */ |
| 446 | static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size) |
| 447 | { |
| 448 | struct device *dev = &ipa->pdev->dev; |
| 449 | struct iommu_domain *domain; |
| 450 | unsigned long iova; |
| 451 | phys_addr_t phys; |
| 452 | int ret; |
| 453 | |
| 454 | if (!size) |
| 455 | return 0; /* IMEM memory not used */ |
| 456 | |
| 457 | domain = iommu_get_domain_for_dev(dev); |
| 458 | if (!domain) { |
| 459 | dev_err(dev, "no IOMMU domain found for IMEM\n"); |
| 460 | return -EINVAL; |
| 461 | } |
| 462 | |
| 463 | /* Align the address down and the size up to page boundaries */ |
| 464 | phys = addr & PAGE_MASK; |
| 465 | size = PAGE_ALIGN(size + addr - phys); |
| 466 | iova = phys; /* We just want a direct mapping */ |
| 467 | |
| 468 | ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE); |
| 469 | if (ret) |
| 470 | return ret; |
| 471 | |
| 472 | ipa->imem_iova = iova; |
| 473 | ipa->imem_size = size; |
| 474 | |
| 475 | return 0; |
| 476 | } |
| 477 | |
| 478 | static void ipa_imem_exit(struct ipa *ipa) |
| 479 | { |
| 480 | struct iommu_domain *domain; |
| 481 | struct device *dev; |
| 482 | |
| 483 | if (!ipa->imem_size) |
| 484 | return; |
| 485 | |
| 486 | dev = &ipa->pdev->dev; |
| 487 | domain = iommu_get_domain_for_dev(dev); |
| 488 | if (domain) { |
| 489 | size_t size; |
| 490 | |
| 491 | size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size); |
| 492 | if (size != ipa->imem_size) |
Alex Elder | 113b6ea | 2021-02-01 17:26:09 -0600 | [diff] [blame] | 493 | dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n", |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 494 | size, ipa->imem_size); |
| 495 | } else { |
| 496 | dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n"); |
| 497 | } |
| 498 | |
| 499 | ipa->imem_size = 0; |
| 500 | ipa->imem_iova = 0; |
| 501 | } |
| 502 | |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 503 | /** |
| 504 | * ipa_smem_init() - Initialize SMEM memory used by the IPA |
| 505 | * @ipa: IPA pointer |
| 506 | * @item: Item ID of SMEM memory |
| 507 | * @size: Size (bytes) of SMEM memory region |
| 508 | * |
| 509 | * SMEM is a managed block of shared DRAM, from which numbered "items" |
| 510 | * can be allocated. One item is designated for use by the IPA. |
| 511 | * |
| 512 | * The modem accesses SMEM memory directly, but the IPA accesses it |
| 513 | * via the IOMMU, using the AP's credentials. |
| 514 | * |
| 515 | * If size provided is non-zero, we allocate it and map it for |
| 516 | * access through the IOMMU. |
| 517 | * |
| 518 | * Note: @size and the item address are is not guaranteed to be page-aligned. |
| 519 | */ |
| 520 | static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size) |
| 521 | { |
| 522 | struct device *dev = &ipa->pdev->dev; |
| 523 | struct iommu_domain *domain; |
| 524 | unsigned long iova; |
| 525 | phys_addr_t phys; |
| 526 | phys_addr_t addr; |
| 527 | size_t actual; |
| 528 | void *virt; |
| 529 | int ret; |
| 530 | |
| 531 | if (!size) |
| 532 | return 0; /* SMEM memory not used */ |
| 533 | |
| 534 | /* SMEM is memory shared between the AP and another system entity |
| 535 | * (in this case, the modem). An allocation from SMEM is persistent |
| 536 | * until the AP reboots; there is no way to free an allocated SMEM |
| 537 | * region. Allocation only reserves the space; to use it you need |
Alex Elder | 5e3bc1e | 2021-06-10 14:23:07 -0500 | [diff] [blame] | 538 | * to "get" a pointer it (this does not imply reference counting). |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 539 | * The item might have already been allocated, in which case we |
| 540 | * use it unless the size isn't what we expect. |
| 541 | */ |
| 542 | ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size); |
| 543 | if (ret && ret != -EEXIST) { |
| 544 | dev_err(dev, "error %d allocating size %zu SMEM item %u\n", |
| 545 | ret, size, item); |
| 546 | return ret; |
| 547 | } |
| 548 | |
| 549 | /* Now get the address of the SMEM memory region */ |
| 550 | virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual); |
| 551 | if (IS_ERR(virt)) { |
| 552 | ret = PTR_ERR(virt); |
| 553 | dev_err(dev, "error %d getting SMEM item %u\n", ret, item); |
| 554 | return ret; |
| 555 | } |
| 556 | |
| 557 | /* In case the region was already allocated, verify the size */ |
| 558 | if (ret && actual != size) { |
| 559 | dev_err(dev, "SMEM item %u has size %zu, expected %zu\n", |
| 560 | item, actual, size); |
| 561 | return -EINVAL; |
| 562 | } |
| 563 | |
| 564 | domain = iommu_get_domain_for_dev(dev); |
| 565 | if (!domain) { |
| 566 | dev_err(dev, "no IOMMU domain found for SMEM\n"); |
| 567 | return -EINVAL; |
| 568 | } |
| 569 | |
| 570 | /* Align the address down and the size up to a page boundary */ |
| 571 | addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK; |
| 572 | phys = addr & PAGE_MASK; |
| 573 | size = PAGE_ALIGN(size + addr - phys); |
| 574 | iova = phys; /* We just want a direct mapping */ |
| 575 | |
| 576 | ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE); |
| 577 | if (ret) |
| 578 | return ret; |
| 579 | |
| 580 | ipa->smem_iova = iova; |
| 581 | ipa->smem_size = size; |
| 582 | |
| 583 | return 0; |
| 584 | } |
| 585 | |
| 586 | static void ipa_smem_exit(struct ipa *ipa) |
| 587 | { |
| 588 | struct device *dev = &ipa->pdev->dev; |
| 589 | struct iommu_domain *domain; |
| 590 | |
| 591 | domain = iommu_get_domain_for_dev(dev); |
| 592 | if (domain) { |
| 593 | size_t size; |
| 594 | |
| 595 | size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size); |
| 596 | if (size != ipa->smem_size) |
Alex Elder | 113b6ea | 2021-02-01 17:26:09 -0600 | [diff] [blame] | 597 | dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n", |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 598 | size, ipa->smem_size); |
| 599 | |
| 600 | } else { |
| 601 | dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n"); |
| 602 | } |
| 603 | |
| 604 | ipa->smem_size = 0; |
| 605 | ipa->smem_iova = 0; |
| 606 | } |
| 607 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 608 | /* Perform memory region-related initialization */ |
Alex Elder | 3128aae | 2020-05-04 12:58:57 -0500 | [diff] [blame] | 609 | int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 610 | { |
| 611 | struct device *dev = &ipa->pdev->dev; |
| 612 | struct resource *res; |
| 613 | int ret; |
| 614 | |
Alex Elder | 1eec767 | 2021-06-09 17:34:59 -0500 | [diff] [blame] | 615 | /* Make sure the set of defined memory regions is valid */ |
| 616 | if (!ipa_mem_valid(ipa, mem_data)) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 617 | return -EINVAL; |
Alex Elder | 1eec767 | 2021-06-09 17:34:59 -0500 | [diff] [blame] | 618 | |
| 619 | ipa->mem_count = mem_data->local_count; |
| 620 | ipa->mem = mem_data->local; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 621 | |
| 622 | ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64)); |
| 623 | if (ret) { |
| 624 | dev_err(dev, "error %d setting DMA mask\n", ret); |
| 625 | return ret; |
| 626 | } |
| 627 | |
| 628 | res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM, |
| 629 | "ipa-shared"); |
| 630 | if (!res) { |
| 631 | dev_err(dev, |
| 632 | "DT error getting \"ipa-shared\" memory property\n"); |
| 633 | return -ENODEV; |
| 634 | } |
| 635 | |
| 636 | ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC); |
| 637 | if (!ipa->mem_virt) { |
| 638 | dev_err(dev, "unable to remap \"ipa-shared\" memory\n"); |
| 639 | return -ENOMEM; |
| 640 | } |
| 641 | |
| 642 | ipa->mem_addr = res->start; |
| 643 | ipa->mem_size = resource_size(res); |
| 644 | |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 645 | ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size); |
| 646 | if (ret) |
| 647 | goto err_unmap; |
| 648 | |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 649 | ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size); |
| 650 | if (ret) |
| 651 | goto err_imem_exit; |
| 652 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 653 | return 0; |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 654 | |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 655 | err_imem_exit: |
| 656 | ipa_imem_exit(ipa); |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 657 | err_unmap: |
| 658 | memunmap(ipa->mem_virt); |
| 659 | |
| 660 | return ret; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 661 | } |
| 662 | |
| 663 | /* Inverse of ipa_mem_init() */ |
| 664 | void ipa_mem_exit(struct ipa *ipa) |
| 665 | { |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 666 | ipa_smem_exit(ipa); |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 667 | ipa_imem_exit(ipa); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 668 | memunmap(ipa->mem_virt); |
| 669 | } |