Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. |
| 4 | * Copyright (C) 2019-2020 Linaro Ltd. |
| 5 | */ |
| 6 | |
| 7 | #include <linux/types.h> |
| 8 | #include <linux/bitfield.h> |
| 9 | #include <linux/bug.h> |
| 10 | #include <linux/dma-mapping.h> |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 11 | #include <linux/iommu.h> |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 12 | #include <linux/io.h> |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 13 | #include <linux/soc/qcom/smem.h> |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 14 | |
| 15 | #include "ipa.h" |
| 16 | #include "ipa_reg.h" |
Alex Elder | 3128aae | 2020-05-04 12:58:57 -0500 | [diff] [blame] | 17 | #include "ipa_data.h" |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 18 | #include "ipa_cmd.h" |
| 19 | #include "ipa_mem.h" |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 20 | #include "ipa_table.h" |
| 21 | #include "gsi_trans.h" |
| 22 | |
| 23 | /* "Canary" value placed between memory regions to detect overflow */ |
| 24 | #define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef) |
| 25 | |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 26 | /* SMEM host id representing the modem. */ |
| 27 | #define QCOM_SMEM_HOST_MODEM 1 |
| 28 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 29 | /* Add an immediate command to a transaction that zeroes a memory region */ |
| 30 | static void |
| 31 | ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem) |
| 32 | { |
| 33 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 34 | dma_addr_t addr = ipa->zero_addr; |
| 35 | |
| 36 | if (!mem->size) |
| 37 | return; |
| 38 | |
| 39 | ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true); |
| 40 | } |
| 41 | |
| 42 | /** |
| 43 | * ipa_mem_setup() - Set up IPA AP and modem shared memory areas |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 44 | * @ipa: IPA pointer |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 45 | * |
| 46 | * Set up the shared memory regions in IPA local memory. This involves |
| 47 | * zero-filling memory regions, and in the case of header memory, telling |
| 48 | * the IPA where it's located. |
| 49 | * |
| 50 | * This function performs the initial setup of this memory. If the modem |
| 51 | * crashes, its regions are re-zeroed in ipa_mem_zero_modem(). |
| 52 | * |
| 53 | * The AP informs the modem where its portions of memory are located |
| 54 | * in a QMI exchange that occurs at modem startup. |
| 55 | * |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 56 | * Return: 0 if successful, or a negative error code |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 57 | */ |
| 58 | int ipa_mem_setup(struct ipa *ipa) |
| 59 | { |
| 60 | dma_addr_t addr = ipa->zero_addr; |
| 61 | struct gsi_trans *trans; |
| 62 | u32 offset; |
| 63 | u16 size; |
| 64 | |
| 65 | /* Get a transaction to define the header memory region and to zero |
| 66 | * the processing context and modem memory regions. |
| 67 | */ |
| 68 | trans = ipa_cmd_trans_alloc(ipa, 4); |
| 69 | if (!trans) { |
| 70 | dev_err(&ipa->pdev->dev, "no transaction for memory setup\n"); |
| 71 | return -EBUSY; |
| 72 | } |
| 73 | |
| 74 | /* Initialize IPA-local header memory. The modem and AP header |
| 75 | * regions are contiguous, and initialized together. |
| 76 | */ |
| 77 | offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset; |
| 78 | size = ipa->mem[IPA_MEM_MODEM_HEADER].size; |
| 79 | size += ipa->mem[IPA_MEM_AP_HEADER].size; |
| 80 | |
| 81 | ipa_cmd_hdr_init_local_add(trans, offset, size, addr); |
| 82 | |
| 83 | ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]); |
| 84 | |
| 85 | ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]); |
| 86 | |
| 87 | ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]); |
| 88 | |
| 89 | gsi_trans_commit_wait(trans); |
| 90 | |
| 91 | /* Tell the hardware where the processing context area is located */ |
Alex Elder | 279dc95 | 2020-10-28 14:41:44 -0500 | [diff] [blame] | 92 | iowrite32(ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset, |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 93 | ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET); |
| 94 | |
| 95 | return 0; |
| 96 | } |
| 97 | |
| 98 | void ipa_mem_teardown(struct ipa *ipa) |
| 99 | { |
| 100 | /* Nothing to do */ |
| 101 | } |
| 102 | |
| 103 | #ifdef IPA_VALIDATE |
| 104 | |
| 105 | static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id) |
| 106 | { |
| 107 | const struct ipa_mem *mem = &ipa->mem[mem_id]; |
| 108 | struct device *dev = &ipa->pdev->dev; |
| 109 | u16 size_multiple; |
| 110 | |
| 111 | /* Other than modem memory, sizes must be a multiple of 8 */ |
| 112 | size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8; |
| 113 | if (mem->size % size_multiple) |
| 114 | dev_err(dev, "region %u size not a multiple of %u bytes\n", |
| 115 | mem_id, size_multiple); |
| 116 | else if (mem->offset % 8) |
| 117 | dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id); |
| 118 | else if (mem->offset < mem->canary_count * sizeof(__le32)) |
| 119 | dev_err(dev, "region %u offset too small for %hu canaries\n", |
| 120 | mem_id, mem->canary_count); |
| 121 | else if (mem->offset + mem->size > ipa->mem_size) |
| 122 | dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n", |
| 123 | mem_id, ipa->mem_size); |
| 124 | else |
| 125 | return true; |
| 126 | |
| 127 | return false; |
| 128 | } |
| 129 | |
| 130 | #else /* !IPA_VALIDATE */ |
| 131 | |
| 132 | static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id) |
| 133 | { |
| 134 | return true; |
| 135 | } |
| 136 | |
| 137 | #endif /*! IPA_VALIDATE */ |
| 138 | |
| 139 | /** |
| 140 | * ipa_mem_config() - Configure IPA shared memory |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 141 | * @ipa: IPA pointer |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 142 | * |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 143 | * Return: 0 if successful, or a negative error code |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 144 | */ |
| 145 | int ipa_mem_config(struct ipa *ipa) |
| 146 | { |
| 147 | struct device *dev = &ipa->pdev->dev; |
| 148 | enum ipa_mem_id mem_id; |
| 149 | dma_addr_t addr; |
| 150 | u32 mem_size; |
| 151 | void *virt; |
| 152 | u32 val; |
| 153 | |
| 154 | /* Check the advertised location and size of the shared memory area */ |
| 155 | val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET); |
| 156 | |
| 157 | /* The fields in the register are in 8 byte units */ |
| 158 | ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK); |
| 159 | /* Make sure the end is within the region's mapped space */ |
| 160 | mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK); |
| 161 | |
| 162 | /* If the sizes don't match, issue a warning */ |
Alex Elder | 2c642c4 | 2020-11-09 10:56:34 -0600 | [diff] [blame] | 163 | if (ipa->mem_offset + mem_size < ipa->mem_size) { |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 164 | dev_warn(dev, "limiting IPA memory size to 0x%08x\n", |
| 165 | mem_size); |
| 166 | ipa->mem_size = mem_size; |
Alex Elder | 2c642c4 | 2020-11-09 10:56:34 -0600 | [diff] [blame] | 167 | } else if (ipa->mem_offset + mem_size > ipa->mem_size) { |
| 168 | dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n", |
| 169 | mem_size); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 170 | } |
| 171 | |
| 172 | /* Prealloc DMA memory for zeroing regions */ |
| 173 | virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL); |
| 174 | if (!virt) |
| 175 | return -ENOMEM; |
| 176 | ipa->zero_addr = addr; |
| 177 | ipa->zero_virt = virt; |
| 178 | ipa->zero_size = IPA_MEM_MAX; |
| 179 | |
| 180 | /* Verify each defined memory region is valid, and if indicated |
| 181 | * for the region, write "canary" values in the space prior to |
| 182 | * the region's base address. |
| 183 | */ |
| 184 | for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) { |
| 185 | const struct ipa_mem *mem = &ipa->mem[mem_id]; |
| 186 | u16 canary_count; |
| 187 | __le32 *canary; |
| 188 | |
| 189 | /* Validate all regions (even undefined ones) */ |
| 190 | if (!ipa_mem_valid(ipa, mem_id)) |
| 191 | goto err_dma_free; |
| 192 | |
| 193 | /* Skip over undefined regions */ |
| 194 | if (!mem->offset && !mem->size) |
| 195 | continue; |
| 196 | |
| 197 | canary_count = mem->canary_count; |
| 198 | if (!canary_count) |
| 199 | continue; |
| 200 | |
| 201 | /* Write canary values in the space before the region */ |
| 202 | canary = ipa->mem_virt + ipa->mem_offset + mem->offset; |
| 203 | do |
| 204 | *--canary = IPA_MEM_CANARY_VAL; |
| 205 | while (--canary_count); |
| 206 | } |
| 207 | |
| 208 | /* Make sure filter and route table memory regions are valid */ |
| 209 | if (!ipa_table_valid(ipa)) |
| 210 | goto err_dma_free; |
| 211 | |
| 212 | /* Validate memory-related properties relevant to immediate commands */ |
| 213 | if (!ipa_cmd_data_valid(ipa)) |
| 214 | goto err_dma_free; |
| 215 | |
| 216 | /* Verify the microcontroller ring alignment (0 is OK too) */ |
| 217 | if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) { |
| 218 | dev_err(dev, "microcontroller ring not 1024-byte aligned\n"); |
| 219 | goto err_dma_free; |
| 220 | } |
| 221 | |
| 222 | return 0; |
| 223 | |
| 224 | err_dma_free: |
| 225 | dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr); |
| 226 | |
| 227 | return -EINVAL; |
| 228 | } |
| 229 | |
| 230 | /* Inverse of ipa_mem_config() */ |
| 231 | void ipa_mem_deconfig(struct ipa *ipa) |
| 232 | { |
| 233 | struct device *dev = &ipa->pdev->dev; |
| 234 | |
| 235 | dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr); |
| 236 | ipa->zero_size = 0; |
| 237 | ipa->zero_virt = NULL; |
| 238 | ipa->zero_addr = 0; |
| 239 | } |
| 240 | |
| 241 | /** |
| 242 | * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 243 | * @ipa: IPA pointer |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 244 | * |
| 245 | * Zero regions of IPA-local memory used by the modem. These are configured |
| 246 | * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and |
| 247 | * restarts via SSR we need to re-initialize them. A QMI message tells the |
| 248 | * modem where to find regions of IPA local memory it needs to know about |
| 249 | * (these included). |
| 250 | */ |
| 251 | int ipa_mem_zero_modem(struct ipa *ipa) |
| 252 | { |
| 253 | struct gsi_trans *trans; |
| 254 | |
| 255 | /* Get a transaction to zero the modem memory, modem header, |
| 256 | * and modem processing context regions. |
| 257 | */ |
| 258 | trans = ipa_cmd_trans_alloc(ipa, 3); |
| 259 | if (!trans) { |
| 260 | dev_err(&ipa->pdev->dev, |
| 261 | "no transaction to zero modem memory\n"); |
| 262 | return -EBUSY; |
| 263 | } |
| 264 | |
| 265 | ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]); |
| 266 | |
| 267 | ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]); |
| 268 | |
| 269 | ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]); |
| 270 | |
| 271 | gsi_trans_commit_wait(trans); |
| 272 | |
| 273 | return 0; |
| 274 | } |
| 275 | |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 276 | /** |
| 277 | * ipa_imem_init() - Initialize IMEM memory used by the IPA |
| 278 | * @ipa: IPA pointer |
| 279 | * @addr: Physical address of the IPA region in IMEM |
| 280 | * @size: Size (bytes) of the IPA region in IMEM |
| 281 | * |
| 282 | * IMEM is a block of shared memory separate from system DRAM, and |
| 283 | * a portion of this memory is available for the IPA to use. The |
| 284 | * modem accesses this memory directly, but the IPA accesses it |
| 285 | * via the IOMMU, using the AP's credentials. |
| 286 | * |
| 287 | * If this region exists (size > 0) we map it for read/write access |
| 288 | * through the IOMMU using the IPA device. |
| 289 | * |
| 290 | * Note: @addr and @size are not guaranteed to be page-aligned. |
| 291 | */ |
| 292 | static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size) |
| 293 | { |
| 294 | struct device *dev = &ipa->pdev->dev; |
| 295 | struct iommu_domain *domain; |
| 296 | unsigned long iova; |
| 297 | phys_addr_t phys; |
| 298 | int ret; |
| 299 | |
| 300 | if (!size) |
| 301 | return 0; /* IMEM memory not used */ |
| 302 | |
| 303 | domain = iommu_get_domain_for_dev(dev); |
| 304 | if (!domain) { |
| 305 | dev_err(dev, "no IOMMU domain found for IMEM\n"); |
| 306 | return -EINVAL; |
| 307 | } |
| 308 | |
| 309 | /* Align the address down and the size up to page boundaries */ |
| 310 | phys = addr & PAGE_MASK; |
| 311 | size = PAGE_ALIGN(size + addr - phys); |
| 312 | iova = phys; /* We just want a direct mapping */ |
| 313 | |
| 314 | ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE); |
| 315 | if (ret) |
| 316 | return ret; |
| 317 | |
| 318 | ipa->imem_iova = iova; |
| 319 | ipa->imem_size = size; |
| 320 | |
| 321 | return 0; |
| 322 | } |
| 323 | |
| 324 | static void ipa_imem_exit(struct ipa *ipa) |
| 325 | { |
| 326 | struct iommu_domain *domain; |
| 327 | struct device *dev; |
| 328 | |
| 329 | if (!ipa->imem_size) |
| 330 | return; |
| 331 | |
| 332 | dev = &ipa->pdev->dev; |
| 333 | domain = iommu_get_domain_for_dev(dev); |
| 334 | if (domain) { |
| 335 | size_t size; |
| 336 | |
| 337 | size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size); |
| 338 | if (size != ipa->imem_size) |
Alex Elder | 113b6ea | 2021-02-01 17:26:09 -0600 | [diff] [blame] | 339 | dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n", |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 340 | size, ipa->imem_size); |
| 341 | } else { |
| 342 | dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n"); |
| 343 | } |
| 344 | |
| 345 | ipa->imem_size = 0; |
| 346 | ipa->imem_iova = 0; |
| 347 | } |
| 348 | |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 349 | /** |
| 350 | * ipa_smem_init() - Initialize SMEM memory used by the IPA |
| 351 | * @ipa: IPA pointer |
| 352 | * @item: Item ID of SMEM memory |
| 353 | * @size: Size (bytes) of SMEM memory region |
| 354 | * |
| 355 | * SMEM is a managed block of shared DRAM, from which numbered "items" |
| 356 | * can be allocated. One item is designated for use by the IPA. |
| 357 | * |
| 358 | * The modem accesses SMEM memory directly, but the IPA accesses it |
| 359 | * via the IOMMU, using the AP's credentials. |
| 360 | * |
| 361 | * If size provided is non-zero, we allocate it and map it for |
| 362 | * access through the IOMMU. |
| 363 | * |
| 364 | * Note: @size and the item address are is not guaranteed to be page-aligned. |
| 365 | */ |
| 366 | static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size) |
| 367 | { |
| 368 | struct device *dev = &ipa->pdev->dev; |
| 369 | struct iommu_domain *domain; |
| 370 | unsigned long iova; |
| 371 | phys_addr_t phys; |
| 372 | phys_addr_t addr; |
| 373 | size_t actual; |
| 374 | void *virt; |
| 375 | int ret; |
| 376 | |
| 377 | if (!size) |
| 378 | return 0; /* SMEM memory not used */ |
| 379 | |
| 380 | /* SMEM is memory shared between the AP and another system entity |
| 381 | * (in this case, the modem). An allocation from SMEM is persistent |
| 382 | * until the AP reboots; there is no way to free an allocated SMEM |
| 383 | * region. Allocation only reserves the space; to use it you need |
| 384 | * to "get" a pointer it (this implies no reference counting). |
| 385 | * The item might have already been allocated, in which case we |
| 386 | * use it unless the size isn't what we expect. |
| 387 | */ |
| 388 | ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size); |
| 389 | if (ret && ret != -EEXIST) { |
| 390 | dev_err(dev, "error %d allocating size %zu SMEM item %u\n", |
| 391 | ret, size, item); |
| 392 | return ret; |
| 393 | } |
| 394 | |
| 395 | /* Now get the address of the SMEM memory region */ |
| 396 | virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual); |
| 397 | if (IS_ERR(virt)) { |
| 398 | ret = PTR_ERR(virt); |
| 399 | dev_err(dev, "error %d getting SMEM item %u\n", ret, item); |
| 400 | return ret; |
| 401 | } |
| 402 | |
| 403 | /* In case the region was already allocated, verify the size */ |
| 404 | if (ret && actual != size) { |
| 405 | dev_err(dev, "SMEM item %u has size %zu, expected %zu\n", |
| 406 | item, actual, size); |
| 407 | return -EINVAL; |
| 408 | } |
| 409 | |
| 410 | domain = iommu_get_domain_for_dev(dev); |
| 411 | if (!domain) { |
| 412 | dev_err(dev, "no IOMMU domain found for SMEM\n"); |
| 413 | return -EINVAL; |
| 414 | } |
| 415 | |
| 416 | /* Align the address down and the size up to a page boundary */ |
| 417 | addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK; |
| 418 | phys = addr & PAGE_MASK; |
| 419 | size = PAGE_ALIGN(size + addr - phys); |
| 420 | iova = phys; /* We just want a direct mapping */ |
| 421 | |
| 422 | ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE); |
| 423 | if (ret) |
| 424 | return ret; |
| 425 | |
| 426 | ipa->smem_iova = iova; |
| 427 | ipa->smem_size = size; |
| 428 | |
| 429 | return 0; |
| 430 | } |
| 431 | |
| 432 | static void ipa_smem_exit(struct ipa *ipa) |
| 433 | { |
| 434 | struct device *dev = &ipa->pdev->dev; |
| 435 | struct iommu_domain *domain; |
| 436 | |
| 437 | domain = iommu_get_domain_for_dev(dev); |
| 438 | if (domain) { |
| 439 | size_t size; |
| 440 | |
| 441 | size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size); |
| 442 | if (size != ipa->smem_size) |
Alex Elder | 113b6ea | 2021-02-01 17:26:09 -0600 | [diff] [blame] | 443 | dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n", |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 444 | size, ipa->smem_size); |
| 445 | |
| 446 | } else { |
| 447 | dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n"); |
| 448 | } |
| 449 | |
| 450 | ipa->smem_size = 0; |
| 451 | ipa->smem_iova = 0; |
| 452 | } |
| 453 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 454 | /* Perform memory region-related initialization */ |
Alex Elder | 3128aae | 2020-05-04 12:58:57 -0500 | [diff] [blame] | 455 | int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 456 | { |
| 457 | struct device *dev = &ipa->pdev->dev; |
| 458 | struct resource *res; |
| 459 | int ret; |
| 460 | |
Alex Elder | 3128aae | 2020-05-04 12:58:57 -0500 | [diff] [blame] | 461 | if (mem_data->local_count > IPA_MEM_COUNT) { |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 462 | dev_err(dev, "to many memory regions (%u > %u)\n", |
Alex Elder | 3128aae | 2020-05-04 12:58:57 -0500 | [diff] [blame] | 463 | mem_data->local_count, IPA_MEM_COUNT); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 464 | return -EINVAL; |
| 465 | } |
| 466 | |
| 467 | ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64)); |
| 468 | if (ret) { |
| 469 | dev_err(dev, "error %d setting DMA mask\n", ret); |
| 470 | return ret; |
| 471 | } |
| 472 | |
| 473 | res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM, |
| 474 | "ipa-shared"); |
| 475 | if (!res) { |
| 476 | dev_err(dev, |
| 477 | "DT error getting \"ipa-shared\" memory property\n"); |
| 478 | return -ENODEV; |
| 479 | } |
| 480 | |
| 481 | ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC); |
| 482 | if (!ipa->mem_virt) { |
| 483 | dev_err(dev, "unable to remap \"ipa-shared\" memory\n"); |
| 484 | return -ENOMEM; |
| 485 | } |
| 486 | |
| 487 | ipa->mem_addr = res->start; |
| 488 | ipa->mem_size = resource_size(res); |
| 489 | |
| 490 | /* The ipa->mem[] array is indexed by enum ipa_mem_id values */ |
Alex Elder | 3128aae | 2020-05-04 12:58:57 -0500 | [diff] [blame] | 491 | ipa->mem = mem_data->local; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 492 | |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 493 | ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size); |
| 494 | if (ret) |
| 495 | goto err_unmap; |
| 496 | |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 497 | ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size); |
| 498 | if (ret) |
| 499 | goto err_imem_exit; |
| 500 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 501 | return 0; |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 502 | |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 503 | err_imem_exit: |
| 504 | ipa_imem_exit(ipa); |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 505 | err_unmap: |
| 506 | memunmap(ipa->mem_virt); |
| 507 | |
| 508 | return ret; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 509 | } |
| 510 | |
| 511 | /* Inverse of ipa_mem_init() */ |
| 512 | void ipa_mem_exit(struct ipa *ipa) |
| 513 | { |
Alex Elder | a0036bb | 2020-05-04 12:58:59 -0500 | [diff] [blame] | 514 | ipa_smem_exit(ipa); |
Alex Elder | 3e313c3 | 2020-05-04 12:58:58 -0500 | [diff] [blame] | 515 | ipa_imem_exit(ipa); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 516 | memunmap(ipa->mem_virt); |
| 517 | } |