blob: 43ea8455546ca28bce769c6b4d9ae6e2e1826797 [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Bjorn Andersson051fb702016-06-20 14:28:41 -07002/*
Bjorn Anderssonef73c222018-09-24 16:45:26 -07003 * Qualcomm self-authenticating modem subsystem remoteproc driver
Bjorn Andersson051fb702016-06-20 14:28:41 -07004 *
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bjorn Andersson051fb702016-06-20 14:28:41 -07008 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
Sibi Sankar318130c2020-07-21 16:59:35 +053012#include <linux/devcoredump.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070013#include <linux/dma-mapping.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/mfd/syscon.h>
17#include <linux/module.h>
18#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053019#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070020#include <linux/platform_device.h>
Rajendra Nayak4760a892019-01-30 16:39:30 -080021#include <linux/pm_domain.h>
22#include <linux/pm_runtime.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070023#include <linux/regmap.h>
24#include <linux/regulator/consumer.h>
25#include <linux/remoteproc.h>
26#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080027#include <linux/soc/qcom/mdt_loader.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053028#include <linux/iopoll.h>
Herbert Xu79990962020-06-12 16:57:37 +100029#include <linux/slab.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070030
31#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080032#include "qcom_common.h"
Bjorn Anderssond4c78d22020-06-22 12:19:40 -070033#include "qcom_pil_info.h"
Bjorn Andersson7d674732018-06-04 13:30:38 -070034#include "qcom_q6v5.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070035
36#include <linux/qcom_scm.h>
37
Bjorn Andersson051fb702016-06-20 14:28:41 -070038#define MPSS_CRASH_REASON_SMEM 421
39
Sibi Sankar318130c2020-07-21 16:59:35 +053040#define MBA_LOG_SIZE SZ_4K
41
Bjorn Andersson051fb702016-06-20 14:28:41 -070042/* RMB Status Register Values */
43#define RMB_PBL_SUCCESS 0x1
44
45#define RMB_MBA_XPU_UNLOCKED 0x1
46#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
47#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
48#define RMB_MBA_AUTH_COMPLETE 0x4
49
50/* PBL/MBA interface registers */
51#define RMB_MBA_IMAGE_REG 0x00
52#define RMB_PBL_STATUS_REG 0x04
53#define RMB_MBA_COMMAND_REG 0x08
54#define RMB_MBA_STATUS_REG 0x0C
55#define RMB_PMI_META_DATA_REG 0x10
56#define RMB_PMI_CODE_START_REG 0x14
57#define RMB_PMI_CODE_LENGTH_REG 0x18
Sibi Sankar231f67d2018-05-21 22:57:13 +053058#define RMB_MBA_MSS_STATUS 0x40
59#define RMB_MBA_ALT_RESET 0x44
Bjorn Andersson051fb702016-06-20 14:28:41 -070060
61#define RMB_CMD_META_DATA_READY 0x1
62#define RMB_CMD_LOAD_READY 0x2
63
64/* QDSP6SS Register Offsets */
65#define QDSP6SS_RESET_REG 0x014
66#define QDSP6SS_GFMUX_CTL_REG 0x020
67#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053068#define QDSP6SS_MEM_PWR_CTL 0x0B0
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -070069#define QDSP6V6SS_MEM_PWR_CTL 0x034
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053070#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070071
72/* AXI Halt Register Offsets */
73#define AXI_HALTREQ_REG 0x0
74#define AXI_HALTACK_REG 0x4
75#define AXI_IDLE_REG 0x8
Sibi Sankar600c39b2020-01-23 18:42:36 +053076#define AXI_GATING_VALID_OVERRIDE BIT(0)
Bjorn Andersson051fb702016-06-20 14:28:41 -070077
Sibi Sankar01bf3fe2020-01-23 18:42:35 +053078#define HALT_ACK_TIMEOUT_US 100000
Bjorn Andersson051fb702016-06-20 14:28:41 -070079
Sibi Sankarc8423792021-09-17 19:25:30 +053080/* QACCEPT Register Offsets */
81#define QACCEPT_ACCEPT_REG 0x0
82#define QACCEPT_ACTIVE_REG 0x4
83#define QACCEPT_DENY_REG 0x8
84#define QACCEPT_REQ_REG 0xC
85
86#define QACCEPT_TIMEOUT_US 50
87
Bjorn Andersson051fb702016-06-20 14:28:41 -070088/* QDSP6SS_RESET */
89#define Q6SS_STOP_CORE BIT(0)
90#define Q6SS_CORE_ARES BIT(1)
91#define Q6SS_BUS_ARES_ENABLE BIT(2)
92
Sibi Sankar7e0f8682020-01-17 19:21:28 +053093/* QDSP6SS CBCR */
94#define Q6SS_CBCR_CLKEN BIT(0)
95#define Q6SS_CBCR_CLKOFF BIT(31)
96#define Q6SS_CBCR_TIMEOUT_US 200
97
Bjorn Andersson051fb702016-06-20 14:28:41 -070098/* QDSP6SS_GFMUX_CTL */
99#define Q6SS_CLK_ENABLE BIT(1)
100
101/* QDSP6SS_PWR_CTL */
102#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
103#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
104#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
105#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
106#define Q6SS_ETB_SLP_NRET_N BIT(17)
107#define Q6SS_L2DATA_STBY_N BIT(18)
108#define Q6SS_SLP_RET_N BIT(19)
109#define Q6SS_CLAMP_IO BIT(20)
110#define QDSS_BHS_ON BIT(21)
111#define QDSS_LDO_BYP BIT(22)
112
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530113/* QDSP6v56 parameters */
114#define QDSP6v56_LDO_BYP BIT(25)
115#define QDSP6v56_BHS_ON BIT(24)
116#define QDSP6v56_CLAMP_WL BIT(21)
117#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530118#define QDSP6SS_XO_CBCR 0x0038
119#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
120
Sibi Sankar231f67d2018-05-21 22:57:13 +0530121/* QDSP6v65 parameters */
Sibi Sankar6439b522019-12-19 11:15:06 +0530122#define QDSP6SS_CORE_CBCR 0x20
Sibi Sankar231f67d2018-05-21 22:57:13 +0530123#define QDSP6SS_SLEEP 0x3C
124#define QDSP6SS_BOOT_CORE_START 0x400
125#define QDSP6SS_BOOT_CMD 0x404
Sibi Sankar231f67d2018-05-21 22:57:13 +0530126#define BOOT_FSM_TIMEOUT 10000
127
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530128struct reg_info {
129 struct regulator *reg;
130 int uV;
131 int uA;
132};
133
134struct qcom_mss_reg_res {
135 const char *supply;
136 int uV;
137 int uA;
138};
139
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530140struct rproc_hexagon_res {
141 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100142 struct qcom_mss_reg_res *proxy_supply;
Stephan Gerhold8750cf32020-09-16 12:41:31 +0200143 struct qcom_mss_reg_res *fallback_proxy_supply;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100144 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530145 char **proxy_clk_names;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530146 char **reset_clk_names;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530147 char **active_clk_names;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800148 char **proxy_pd_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530149 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530150 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530151 bool has_alt_reset;
Sibi Sankar318130c2020-07-21 16:59:35 +0530152 bool has_mba_logs;
Sibi Sankara9fdc792020-04-15 20:21:10 +0530153 bool has_spare_reg;
Sibi Sankarc8423792021-09-17 19:25:30 +0530154 bool has_qaccept_regs;
155 bool has_ext_cntl_regs;
156 bool has_vq6;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530157};
158
Bjorn Andersson051fb702016-06-20 14:28:41 -0700159struct q6v5 {
160 struct device *dev;
161 struct rproc *rproc;
162
163 void __iomem *reg_base;
164 void __iomem *rmb_base;
165
166 struct regmap *halt_map;
Sibi Sankar6439b522019-12-19 11:15:06 +0530167 struct regmap *conn_map;
168
Bjorn Andersson051fb702016-06-20 14:28:41 -0700169 u32 halt_q6;
170 u32 halt_modem;
171 u32 halt_nc;
Sibi Sankarc8423792021-09-17 19:25:30 +0530172 u32 halt_vq6;
Sibi Sankar6439b522019-12-19 11:15:06 +0530173 u32 conn_box;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700174
Sibi Sankarc8423792021-09-17 19:25:30 +0530175 u32 qaccept_mdm;
176 u32 qaccept_cx;
177 u32 qaccept_axi;
178
179 u32 axim1_clk_off;
180 u32 crypto_clk_off;
181 u32 force_clk_on;
182 u32 rscc_disable;
183
Bjorn Andersson051fb702016-06-20 14:28:41 -0700184 struct reset_control *mss_restart;
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530185 struct reset_control *pdc_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700186
Bjorn Andersson7d674732018-06-04 13:30:38 -0700187 struct qcom_q6v5 q6v5;
Sibi Sankar663e9842018-05-21 22:57:09 +0530188
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530189 struct clk *active_clks[8];
Sibi Sankar231f67d2018-05-21 22:57:13 +0530190 struct clk *reset_clks[4];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530191 struct clk *proxy_clks[4];
Rajendra Nayak4760a892019-01-30 16:39:30 -0800192 struct device *proxy_pds[3];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530193 int active_clk_count;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530194 int reset_clk_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530195 int proxy_clk_count;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800196 int proxy_pd_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530197
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530198 struct reg_info active_regs[1];
Stephan Gerhold8750cf32020-09-16 12:41:31 +0200199 struct reg_info proxy_regs[1];
200 struct reg_info fallback_proxy_regs[2];
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530201 int active_reg_count;
202 int proxy_reg_count;
Stephan Gerhold8750cf32020-09-16 12:41:31 +0200203 int fallback_proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700204
Sibi Sankar03045302018-10-17 19:25:25 +0530205 bool dump_mba_loaded;
Sibi Sankar7ac516d2020-07-16 15:20:32 -0700206 size_t current_dump_size;
207 size_t total_dump_size;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +0530208
Bjorn Andersson051fb702016-06-20 14:28:41 -0700209 phys_addr_t mba_phys;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700210 size_t mba_size;
Sibi Sankarfe6a5dc2020-07-23 01:40:47 +0530211 size_t dp_size;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700212
213 phys_addr_t mpss_phys;
214 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700215 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800216
Sibi Sankar47254962018-05-21 22:57:14 +0530217 struct qcom_rproc_glink glink_subdev;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800218 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700219 struct qcom_rproc_ssr ssr_subdev;
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700220 struct qcom_sysmon *sysmon;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530221 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530222 bool has_alt_reset;
Sibi Sankar318130c2020-07-21 16:59:35 +0530223 bool has_mba_logs;
Sibi Sankara9fdc792020-04-15 20:21:10 +0530224 bool has_spare_reg;
Sibi Sankarc8423792021-09-17 19:25:30 +0530225 bool has_qaccept_regs;
226 bool has_ext_cntl_regs;
227 bool has_vq6;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530228 int mpss_perm;
229 int mba_perm;
Sibi Sankara5a4e022019-01-15 01:20:01 +0530230 const char *hexagon_mdt_image;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530231 int version;
232};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530233
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530234enum {
235 MSS_MSM8916,
236 MSS_MSM8974,
237 MSS_MSM8996,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700238 MSS_MSM8998,
Sibi Sankar6439b522019-12-19 11:15:06 +0530239 MSS_SC7180,
Sibi Sankarc8423792021-09-17 19:25:30 +0530240 MSS_SC7280,
Sibi Sankar231f67d2018-05-21 22:57:13 +0530241 MSS_SDM845,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700242};
243
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530244static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
245 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700246{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530247 int rc;
248 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700249
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800250 if (!reg_res)
251 return 0;
252
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530253 for (i = 0; reg_res[i].supply; i++) {
254 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
255 if (IS_ERR(regs[i].reg)) {
256 rc = PTR_ERR(regs[i].reg);
257 if (rc != -EPROBE_DEFER)
258 dev_err(dev, "Failed to get %s\n regulator",
259 reg_res[i].supply);
260 return rc;
261 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700262
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530263 regs[i].uV = reg_res[i].uV;
264 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700265 }
266
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530267 return i;
268}
269
270static int q6v5_regulator_enable(struct q6v5 *qproc,
271 struct reg_info *regs, int count)
272{
273 int ret;
274 int i;
275
276 for (i = 0; i < count; i++) {
277 if (regs[i].uV > 0) {
278 ret = regulator_set_voltage(regs[i].reg,
279 regs[i].uV, INT_MAX);
280 if (ret) {
281 dev_err(qproc->dev,
282 "Failed to request voltage for %d.\n",
283 i);
284 goto err;
285 }
286 }
287
288 if (regs[i].uA > 0) {
289 ret = regulator_set_load(regs[i].reg,
290 regs[i].uA);
291 if (ret < 0) {
292 dev_err(qproc->dev,
293 "Failed to set regulator mode\n");
294 goto err;
295 }
296 }
297
298 ret = regulator_enable(regs[i].reg);
299 if (ret) {
300 dev_err(qproc->dev, "Regulator enable failed\n");
301 goto err;
302 }
303 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700304
305 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530306err:
307 for (; i >= 0; i--) {
308 if (regs[i].uV > 0)
309 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
310
311 if (regs[i].uA > 0)
312 regulator_set_load(regs[i].reg, 0);
313
314 regulator_disable(regs[i].reg);
315 }
316
317 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700318}
319
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530320static void q6v5_regulator_disable(struct q6v5 *qproc,
321 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700322{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530323 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700324
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530325 for (i = 0; i < count; i++) {
326 if (regs[i].uV > 0)
327 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700328
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530329 if (regs[i].uA > 0)
330 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700331
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530332 regulator_disable(regs[i].reg);
333 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700334}
335
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530336static int q6v5_clk_enable(struct device *dev,
337 struct clk **clks, int count)
338{
339 int rc;
340 int i;
341
342 for (i = 0; i < count; i++) {
343 rc = clk_prepare_enable(clks[i]);
344 if (rc) {
345 dev_err(dev, "Clock enable failed\n");
346 goto err;
347 }
348 }
349
350 return 0;
351err:
352 for (i--; i >= 0; i--)
353 clk_disable_unprepare(clks[i]);
354
355 return rc;
356}
357
358static void q6v5_clk_disable(struct device *dev,
359 struct clk **clks, int count)
360{
361 int i;
362
363 for (i = 0; i < count; i++)
364 clk_disable_unprepare(clks[i]);
365}
366
Rajendra Nayak4760a892019-01-30 16:39:30 -0800367static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
368 size_t pd_count)
369{
370 int ret;
371 int i;
372
373 for (i = 0; i < pd_count; i++) {
374 dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
375 ret = pm_runtime_get_sync(pds[i]);
Zhang Qilonga2472302020-11-02 22:34:33 +0800376 if (ret < 0) {
377 pm_runtime_put_noidle(pds[i]);
378 dev_pm_genpd_set_performance_state(pds[i], 0);
Rajendra Nayak4760a892019-01-30 16:39:30 -0800379 goto unroll_pd_votes;
Zhang Qilonga2472302020-11-02 22:34:33 +0800380 }
Rajendra Nayak4760a892019-01-30 16:39:30 -0800381 }
382
383 return 0;
384
385unroll_pd_votes:
386 for (i--; i >= 0; i--) {
387 dev_pm_genpd_set_performance_state(pds[i], 0);
388 pm_runtime_put(pds[i]);
389 }
390
391 return ret;
Alex Elder58396812020-04-03 12:50:05 -0500392}
Rajendra Nayak4760a892019-01-30 16:39:30 -0800393
394static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
395 size_t pd_count)
396{
397 int i;
398
399 for (i = 0; i < pd_count; i++) {
400 dev_pm_genpd_set_performance_state(pds[i], 0);
401 pm_runtime_put(pds[i]);
402 }
403}
404
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530405static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +0530406 bool local, bool remote, phys_addr_t addr,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530407 size_t size)
408{
Bjorn Andersson715d8522020-03-05 01:17:28 +0530409 struct qcom_scm_vmperm next[2];
410 int perms = 0;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530411
412 if (!qproc->need_mem_protection)
413 return 0;
Bjorn Andersson715d8522020-03-05 01:17:28 +0530414
415 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
416 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530417 return 0;
418
Bjorn Andersson715d8522020-03-05 01:17:28 +0530419 if (local) {
420 next[perms].vmid = QCOM_SCM_VMID_HLOS;
421 next[perms].perm = QCOM_SCM_PERM_RWX;
422 perms++;
423 }
424
425 if (remote) {
426 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
427 next[perms].perm = QCOM_SCM_PERM_RW;
428 perms++;
429 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530430
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800431 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
Bjorn Andersson715d8522020-03-05 01:17:28 +0530432 current_perm, next, perms);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530433}
434
Sibi Sankara7ed5e52020-11-04 12:33:42 +0530435static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
Sibi Sankarfe6a5dc2020-07-23 01:40:47 +0530436{
437 const struct firmware *dp_fw;
438
439 if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
440 return;
441
442 if (SZ_1M + dp_fw->size <= qproc->mba_size) {
Sibi Sankara7ed5e52020-11-04 12:33:42 +0530443 memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
Sibi Sankarfe6a5dc2020-07-23 01:40:47 +0530444 qproc->dp_size = dp_fw->size;
445 }
446
447 release_firmware(dp_fw);
448}
449
Bjorn Andersson051fb702016-06-20 14:28:41 -0700450static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
451{
452 struct q6v5 *qproc = rproc->priv;
Sibi Sankara7ed5e52020-11-04 12:33:42 +0530453 void *mba_region;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700454
Sibi Sankare013f455d2020-07-23 01:40:45 +0530455 /* MBA is restricted to a maximum size of 1M */
456 if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
457 dev_err(qproc->dev, "MBA firmware load failed\n");
458 return -EINVAL;
459 }
460
Sibi Sankara7ed5e52020-11-04 12:33:42 +0530461 mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
462 if (!mba_region) {
463 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
464 &qproc->mba_phys, qproc->mba_size);
465 return -EBUSY;
466 }
467
468 memcpy(mba_region, fw->data, fw->size);
469 q6v5_debug_policy_load(qproc, mba_region);
470 memunmap(mba_region);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700471
472 return 0;
473}
474
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530475static int q6v5_reset_assert(struct q6v5 *qproc)
476{
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530477 int ret;
478
479 if (qproc->has_alt_reset) {
480 reset_control_assert(qproc->pdc_reset);
481 ret = reset_control_reset(qproc->mss_restart);
482 reset_control_deassert(qproc->pdc_reset);
Sibi Sankara9fdc792020-04-15 20:21:10 +0530483 } else if (qproc->has_spare_reg) {
Sibi Sankar600c39b2020-01-23 18:42:36 +0530484 /*
485 * When the AXI pipeline is being reset with the Q6 modem partly
486 * operational there is possibility of AXI valid signal to
487 * glitch, leading to spurious transactions and Q6 hangs. A work
488 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
Sibi Sankara9fdc792020-04-15 20:21:10 +0530489 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
490 * is withdrawn post MSS assert followed by a MSS deassert,
491 * while holding the PDC reset.
Sibi Sankar600c39b2020-01-23 18:42:36 +0530492 */
Sibi Sankar6439b522019-12-19 11:15:06 +0530493 reset_control_assert(qproc->pdc_reset);
494 regmap_update_bits(qproc->conn_map, qproc->conn_box,
Sibi Sankar600c39b2020-01-23 18:42:36 +0530495 AXI_GATING_VALID_OVERRIDE, 1);
Sibi Sankar6439b522019-12-19 11:15:06 +0530496 reset_control_assert(qproc->mss_restart);
497 reset_control_deassert(qproc->pdc_reset);
498 regmap_update_bits(qproc->conn_map, qproc->conn_box,
Sibi Sankar600c39b2020-01-23 18:42:36 +0530499 AXI_GATING_VALID_OVERRIDE, 0);
Sibi Sankar6439b522019-12-19 11:15:06 +0530500 ret = reset_control_deassert(qproc->mss_restart);
Sibi Sankarc8423792021-09-17 19:25:30 +0530501 } else if (qproc->has_ext_cntl_regs) {
502 regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
503 reset_control_assert(qproc->pdc_reset);
504 reset_control_assert(qproc->mss_restart);
505 reset_control_deassert(qproc->pdc_reset);
506 ret = reset_control_deassert(qproc->mss_restart);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530507 } else {
508 ret = reset_control_assert(qproc->mss_restart);
509 }
510
511 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530512}
513
514static int q6v5_reset_deassert(struct q6v5 *qproc)
515{
Sibi Sankar231f67d2018-05-21 22:57:13 +0530516 int ret;
517
518 if (qproc->has_alt_reset) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530519 reset_control_assert(qproc->pdc_reset);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530520 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
521 ret = reset_control_reset(qproc->mss_restart);
522 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530523 reset_control_deassert(qproc->pdc_reset);
Sibi Sankarc8423792021-09-17 19:25:30 +0530524 } else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
Sibi Sankar6439b522019-12-19 11:15:06 +0530525 ret = reset_control_reset(qproc->mss_restart);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530526 } else {
527 ret = reset_control_deassert(qproc->mss_restart);
528 }
529
530 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530531}
532
Bjorn Andersson051fb702016-06-20 14:28:41 -0700533static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
534{
535 unsigned long timeout;
536 s32 val;
537
538 timeout = jiffies + msecs_to_jiffies(ms);
539 for (;;) {
540 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
541 if (val)
542 break;
543
544 if (time_after(jiffies, timeout))
545 return -ETIMEDOUT;
546
547 msleep(1);
548 }
549
550 return val;
551}
552
553static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
554{
555
556 unsigned long timeout;
557 s32 val;
558
559 timeout = jiffies + msecs_to_jiffies(ms);
560 for (;;) {
561 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
562 if (val < 0)
563 break;
564
565 if (!status && val)
566 break;
567 else if (status && val == status)
568 break;
569
570 if (time_after(jiffies, timeout))
571 return -ETIMEDOUT;
572
573 msleep(1);
574 }
575
576 return val;
577}
578
Sibi Sankar318130c2020-07-21 16:59:35 +0530579static void q6v5_dump_mba_logs(struct q6v5 *qproc)
580{
581 struct rproc *rproc = qproc->rproc;
582 void *data;
Sibi Sankara7ed5e52020-11-04 12:33:42 +0530583 void *mba_region;
Sibi Sankar318130c2020-07-21 16:59:35 +0530584
585 if (!qproc->has_mba_logs)
586 return;
587
588 if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
589 qproc->mba_size))
590 return;
591
Sibi Sankara7ed5e52020-11-04 12:33:42 +0530592 mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
593 if (!mba_region)
Sibi Sankar318130c2020-07-21 16:59:35 +0530594 return;
595
Sibi Sankara7ed5e52020-11-04 12:33:42 +0530596 data = vmalloc(MBA_LOG_SIZE);
597 if (data) {
598 memcpy(data, mba_region, MBA_LOG_SIZE);
599 dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
600 }
601 memunmap(mba_region);
Sibi Sankar318130c2020-07-21 16:59:35 +0530602}
603
Bjorn Andersson051fb702016-06-20 14:28:41 -0700604static int q6v5proc_reset(struct q6v5 *qproc)
605{
606 u32 val;
607 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530608 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700609
Sibi Sankar231f67d2018-05-21 22:57:13 +0530610 if (qproc->version == MSS_SDM845) {
611 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530612 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530613 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700614
Sibi Sankar231f67d2018-05-21 22:57:13 +0530615 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530616 val, !(val & Q6SS_CBCR_CLKOFF), 1,
617 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530618 if (ret) {
619 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
620 return -ETIMEDOUT;
621 }
622
623 /* De-assert QDSP6 stop core */
624 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
625 /* Trigger boot FSM */
626 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
627
628 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
629 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
630 if (ret) {
631 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
632 /* Reset the modem so that boot FSM is in reset state */
633 q6v5_reset_deassert(qproc);
634 return ret;
635 }
636
637 goto pbl_wait;
Sibi Sankarc8423792021-09-17 19:25:30 +0530638 } else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
Sibi Sankar6439b522019-12-19 11:15:06 +0530639 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530640 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530641 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
642
643 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530644 val, !(val & Q6SS_CBCR_CLKOFF), 1,
645 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530646 if (ret) {
647 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
648 return -ETIMEDOUT;
649 }
650
651 /* Turn on the XO clock needed for PLL setup */
652 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530653 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530654 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
655
656 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530657 val, !(val & Q6SS_CBCR_CLKOFF), 1,
658 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530659 if (ret) {
660 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
661 return -ETIMEDOUT;
662 }
663
664 /* Configure Q6 core CBCR to auto-enable after reset sequence */
665 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530666 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530667 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
668
669 /* De-assert the Q6 stop core signal */
670 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
671
Sibi Sankar4e6751a2020-07-16 17:35:14 +0530672 /* Wait for 10 us for any staggering logic to settle */
673 usleep_range(10, 20);
674
Sibi Sankar6439b522019-12-19 11:15:06 +0530675 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
676 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
677
Sibi Sankar4e6751a2020-07-16 17:35:14 +0530678 /* Poll the MSS_STATUS for FSM completion */
679 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
680 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
Sibi Sankar6439b522019-12-19 11:15:06 +0530681 if (ret) {
682 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
683 /* Reset the modem so that boot FSM is in reset state */
684 q6v5_reset_deassert(qproc);
685 return ret;
686 }
687 goto pbl_wait;
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700688 } else if (qproc->version == MSS_MSM8996 ||
689 qproc->version == MSS_MSM8998) {
690 int mem_pwr_ctl;
691
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530692 /* Override the ACC value if required */
693 writel(QDSP6SS_ACC_OVERRIDE_VAL,
694 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700695
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530696 /* Assert resets, stop core */
697 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
698 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
699 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700700
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530701 /* BHS require xo cbcr to be enabled */
702 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530703 val |= Q6SS_CBCR_CLKEN;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530704 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
705
706 /* Read CLKOFF bit to go low indicating CLK is enabled */
707 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530708 val, !(val & Q6SS_CBCR_CLKOFF), 1,
709 Q6SS_CBCR_TIMEOUT_US);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530710 if (ret) {
711 dev_err(qproc->dev,
712 "xo cbcr enabling timed out (rc:%d)\n", ret);
713 return ret;
714 }
715 /* Enable power block headswitch and wait for it to stabilize */
716 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
717 val |= QDSP6v56_BHS_ON;
718 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
719 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
720 udelay(1);
721
722 /* Put LDO in bypass mode */
723 val |= QDSP6v56_LDO_BYP;
724 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
725
726 /* Deassert QDSP6 compiler memory clamp */
727 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
728 val &= ~QDSP6v56_CLAMP_QMC_MEM;
729 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
730
731 /* Deassert memory peripheral sleep and L2 memory standby */
732 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
733 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
734
735 /* Turn on L1, L2, ETB and JU memories 1 at a time */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700736 if (qproc->version == MSS_MSM8996) {
737 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
738 i = 19;
739 } else {
740 /* MSS_MSM8998 */
741 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
742 i = 28;
743 }
744 val = readl(qproc->reg_base + mem_pwr_ctl);
745 for (; i >= 0; i--) {
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530746 val |= BIT(i);
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700747 writel(val, qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530748 /*
749 * Read back value to ensure the write is done then
750 * wait for 1us for both memory peripheral and data
751 * array to turn on.
752 */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700753 val |= readl(qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530754 udelay(1);
755 }
756 /* Remove word line clamp */
757 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
758 val &= ~QDSP6v56_CLAMP_WL;
759 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
760 } else {
761 /* Assert resets, stop core */
762 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
763 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
764 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
765
766 /* Enable power block headswitch and wait for it to stabilize */
767 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
768 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
769 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
770 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
771 udelay(1);
772 /*
773 * Turn on memories. L2 banks should be done individually
774 * to minimize inrush current.
775 */
776 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
777 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
778 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
779 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
780 val |= Q6SS_L2DATA_SLP_NRET_N_2;
781 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
782 val |= Q6SS_L2DATA_SLP_NRET_N_1;
783 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
784 val |= Q6SS_L2DATA_SLP_NRET_N_0;
785 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
786 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700787 /* Remove IO clamp */
788 val &= ~Q6SS_CLAMP_IO;
789 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
790
791 /* Bring core out of reset */
792 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
793 val &= ~Q6SS_CORE_ARES;
794 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
795
796 /* Turn on core clock */
797 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
798 val |= Q6SS_CLK_ENABLE;
799 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
800
801 /* Start core execution */
802 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
803 val &= ~Q6SS_STOP_CORE;
804 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
805
Sibi Sankar231f67d2018-05-21 22:57:13 +0530806pbl_wait:
Bjorn Andersson051fb702016-06-20 14:28:41 -0700807 /* Wait for PBL status */
808 ret = q6v5_rmb_pbl_wait(qproc, 1000);
809 if (ret == -ETIMEDOUT) {
810 dev_err(qproc->dev, "PBL boot timed out\n");
811 } else if (ret != RMB_PBL_SUCCESS) {
812 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
813 ret = -EINVAL;
814 } else {
815 ret = 0;
816 }
817
818 return ret;
819}
820
Sibi Sankarc8423792021-09-17 19:25:30 +0530821static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
822{
823 unsigned int val;
824 int ret;
825
826 if (!qproc->has_qaccept_regs)
827 return 0;
828
829 if (qproc->has_ext_cntl_regs) {
830 regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
831 regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
832
833 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
834 !val, 1, Q6SS_CBCR_TIMEOUT_US);
835 if (ret) {
836 dev_err(qproc->dev, "failed to enable axim1 clock\n");
837 return -ETIMEDOUT;
838 }
839 }
840
841 regmap_write(map, offset + QACCEPT_REQ_REG, 1);
842
843 /* Wait for accept */
844 ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
845 QACCEPT_TIMEOUT_US);
846 if (ret) {
847 dev_err(qproc->dev, "qchannel enable failed\n");
848 return -ETIMEDOUT;
849 }
850
851 return 0;
852}
853
854static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
855{
856 int ret;
857 unsigned int val, retry;
858 unsigned int nretry = 10;
859 bool takedown_complete = false;
860
861 if (!qproc->has_qaccept_regs)
862 return;
863
864 while (!takedown_complete && nretry) {
865 nretry--;
866
867 /* Wait for active transactions to complete */
868 regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
869 QACCEPT_TIMEOUT_US);
870
871 /* Request Q-channel transaction takedown */
872 regmap_write(map, offset + QACCEPT_REQ_REG, 0);
873
874 /*
875 * If the request is denied, reset the Q-channel takedown request,
876 * wait for active transactions to complete and retry takedown.
877 */
878 retry = 10;
879 while (retry) {
880 usleep_range(5, 10);
881 retry--;
882 ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
883 if (!ret && val) {
884 regmap_write(map, offset + QACCEPT_REQ_REG, 1);
885 break;
886 }
887
888 ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
889 if (!ret && !val) {
890 takedown_complete = true;
891 break;
892 }
893 }
894
895 if (!retry)
896 break;
897 }
898
899 /* Rely on mss_restart to clear out pending transactions on takedown failure */
900 if (!takedown_complete)
901 dev_err(qproc->dev, "qchannel takedown failed\n");
902}
903
Bjorn Andersson051fb702016-06-20 14:28:41 -0700904static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
905 struct regmap *halt_map,
906 u32 offset)
907{
Bjorn Andersson051fb702016-06-20 14:28:41 -0700908 unsigned int val;
909 int ret;
910
911 /* Check if we're already idle */
912 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
913 if (!ret && val)
914 return;
915
916 /* Assert halt request */
917 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
918
919 /* Wait for halt */
Sibi Sankar01bf3fe2020-01-23 18:42:35 +0530920 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
921 val, 1000, HALT_ACK_TIMEOUT_US);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700922
923 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
924 if (ret || !val)
925 dev_err(qproc->dev, "port failed halt\n");
926
927 /* Clear halt request (port will remain halted until reset) */
928 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
929}
930
931static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
932{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700933 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700934 dma_addr_t phys;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700935 void *metadata;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530936 int mdata_perm;
937 int xferop_ret;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700938 size_t size;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700939 void *ptr;
940 int ret;
941
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700942 metadata = qcom_mdt_read_metadata(fw, &size);
943 if (IS_ERR(metadata))
944 return PTR_ERR(metadata);
945
946 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700947 if (!ptr) {
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700948 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700949 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
950 return -ENOMEM;
951 }
952
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700953 memcpy(ptr, metadata, size);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700954
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530955 /* Hypervisor mapping to access metadata by modem */
956 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Andersson715d8522020-03-05 01:17:28 +0530957 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
958 phys, size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800959 if (ret) {
960 dev_err(qproc->dev,
961 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100962 ret = -EAGAIN;
963 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800964 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530965
Bjorn Andersson051fb702016-06-20 14:28:41 -0700966 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
967 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
968
969 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
970 if (ret == -ETIMEDOUT)
971 dev_err(qproc->dev, "MPSS header authentication timed out\n");
972 else if (ret < 0)
973 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
974
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530975 /* Metadata authentication done, remove modem access */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530976 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
977 phys, size);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530978 if (xferop_ret)
979 dev_warn(qproc->dev,
980 "mdt buffer not reclaimed system may become unstable\n");
981
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100982free_dma_attrs:
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700983 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
984 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700985
986 return ret < 0 ? ret : 0;
987}
988
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800989static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
990{
991 if (phdr->p_type != PT_LOAD)
992 return false;
993
994 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
995 return false;
996
997 if (!phdr->p_memsz)
998 return false;
999
1000 return true;
1001}
1002
Sibi Sankar03045302018-10-17 19:25:25 +05301003static int q6v5_mba_load(struct q6v5 *qproc)
1004{
1005 int ret;
1006 int xfermemop_ret;
Sibi Sankar318130c2020-07-21 16:59:35 +05301007 bool mba_load_err = false;
Sibi Sankar03045302018-10-17 19:25:25 +05301008
Sibi Sankarc1fe10d2021-09-16 19:29:21 +05301009 ret = qcom_q6v5_prepare(&qproc->q6v5);
1010 if (ret)
1011 return ret;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001012
Rajendra Nayak4760a892019-01-30 16:39:30 -08001013 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1014 if (ret < 0) {
1015 dev_err(qproc->dev, "failed to enable proxy power domains\n");
Sibi Sankarc1fe10d2021-09-16 19:29:21 +05301016 goto disable_irqs;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001017 }
1018
Stephan Gerhold8750cf32020-09-16 12:41:31 +02001019 ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
1020 qproc->fallback_proxy_reg_count);
1021 if (ret) {
1022 dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
1023 goto disable_proxy_pds;
1024 }
1025
Sibi Sankar03045302018-10-17 19:25:25 +05301026 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
1027 qproc->proxy_reg_count);
1028 if (ret) {
1029 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Stephan Gerhold8750cf32020-09-16 12:41:31 +02001030 goto disable_fallback_proxy_reg;
Sibi Sankar03045302018-10-17 19:25:25 +05301031 }
1032
1033 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
1034 qproc->proxy_clk_count);
1035 if (ret) {
1036 dev_err(qproc->dev, "failed to enable proxy clocks\n");
1037 goto disable_proxy_reg;
1038 }
1039
1040 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
1041 qproc->active_reg_count);
1042 if (ret) {
1043 dev_err(qproc->dev, "failed to enable supplies\n");
1044 goto disable_proxy_clk;
1045 }
1046
1047 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
1048 qproc->reset_clk_count);
1049 if (ret) {
1050 dev_err(qproc->dev, "failed to enable reset clocks\n");
1051 goto disable_vdd;
1052 }
1053
1054 ret = q6v5_reset_deassert(qproc);
1055 if (ret) {
1056 dev_err(qproc->dev, "failed to deassert mss restart\n");
1057 goto disable_reset_clks;
1058 }
1059
1060 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
1061 qproc->active_clk_count);
1062 if (ret) {
1063 dev_err(qproc->dev, "failed to enable clocks\n");
1064 goto assert_reset;
1065 }
1066
Sibi Sankarc8423792021-09-17 19:25:30 +05301067 ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1068 if (ret) {
1069 dev_err(qproc->dev, "failed to enable axi bridge\n");
1070 goto disable_active_clks;
1071 }
1072
Sibi Sankar4360f932020-09-17 23:28:40 +05301073 /*
1074 * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
1075 * the Q6 access to this region.
1076 */
1077 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1078 qproc->mpss_phys, qproc->mpss_size);
1079 if (ret) {
1080 dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
1081 goto disable_active_clks;
1082 }
1083
Sibi Sankar03045302018-10-17 19:25:25 +05301084 /* Assign MBA image access in DDR to q6 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301085 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
Sibi Sankar03045302018-10-17 19:25:25 +05301086 qproc->mba_phys, qproc->mba_size);
1087 if (ret) {
1088 dev_err(qproc->dev,
1089 "assigning Q6 access to mba memory failed: %d\n", ret);
1090 goto disable_active_clks;
1091 }
1092
1093 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
Sibi Sankarfe6a5dc2020-07-23 01:40:47 +05301094 if (qproc->dp_size) {
1095 writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1096 writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1097 }
Sibi Sankar03045302018-10-17 19:25:25 +05301098
1099 ret = q6v5proc_reset(qproc);
1100 if (ret)
1101 goto reclaim_mba;
1102
1103 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
1104 if (ret == -ETIMEDOUT) {
1105 dev_err(qproc->dev, "MBA boot timed out\n");
1106 goto halt_axi_ports;
1107 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
1108 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
1109 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
1110 ret = -EINVAL;
1111 goto halt_axi_ports;
1112 }
1113
1114 qproc->dump_mba_loaded = true;
1115 return 0;
1116
1117halt_axi_ports:
1118 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
Sibi Sankarc8423792021-09-17 19:25:30 +05301119 if (qproc->has_vq6)
1120 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
Sibi Sankar03045302018-10-17 19:25:25 +05301121 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1122 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Sibi Sankarc8423792021-09-17 19:25:30 +05301123 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1124 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1125 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
Sibi Sankar318130c2020-07-21 16:59:35 +05301126 mba_load_err = true;
Sibi Sankar03045302018-10-17 19:25:25 +05301127reclaim_mba:
Bjorn Andersson715d8522020-03-05 01:17:28 +05301128 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1129 false, qproc->mba_phys,
Sibi Sankar03045302018-10-17 19:25:25 +05301130 qproc->mba_size);
1131 if (xfermemop_ret) {
1132 dev_err(qproc->dev,
1133 "Failed to reclaim mba buffer, system may become unstable\n");
Sibi Sankar318130c2020-07-21 16:59:35 +05301134 } else if (mba_load_err) {
1135 q6v5_dump_mba_logs(qproc);
Sibi Sankar03045302018-10-17 19:25:25 +05301136 }
1137
1138disable_active_clks:
1139 q6v5_clk_disable(qproc->dev, qproc->active_clks,
1140 qproc->active_clk_count);
1141assert_reset:
1142 q6v5_reset_assert(qproc);
1143disable_reset_clks:
1144 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1145 qproc->reset_clk_count);
1146disable_vdd:
1147 q6v5_regulator_disable(qproc, qproc->active_regs,
1148 qproc->active_reg_count);
1149disable_proxy_clk:
1150 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1151 qproc->proxy_clk_count);
1152disable_proxy_reg:
1153 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1154 qproc->proxy_reg_count);
Stephan Gerhold8750cf32020-09-16 12:41:31 +02001155disable_fallback_proxy_reg:
1156 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1157 qproc->fallback_proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001158disable_proxy_pds:
1159 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301160disable_irqs:
1161 qcom_q6v5_unprepare(&qproc->q6v5);
1162
1163 return ret;
1164}
1165
1166static void q6v5_mba_reclaim(struct q6v5 *qproc)
1167{
1168 int ret;
1169 u32 val;
1170
1171 qproc->dump_mba_loaded = false;
Sibi Sankarfe6a5dc2020-07-23 01:40:47 +05301172 qproc->dp_size = 0;
Sibi Sankar03045302018-10-17 19:25:25 +05301173
1174 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
Sibi Sankarc8423792021-09-17 19:25:30 +05301175 if (qproc->has_vq6)
1176 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
Sibi Sankar03045302018-10-17 19:25:25 +05301177 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1178 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1179 if (qproc->version == MSS_MSM8996) {
1180 /*
1181 * To avoid high MX current during LPASS/MSS restart.
1182 */
1183 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1184 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
1185 QDSP6v56_CLAMP_QMC_MEM;
1186 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1187 }
1188
Sibi Sankarc8423792021-09-17 19:25:30 +05301189 if (qproc->has_ext_cntl_regs) {
1190 regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
1191
1192 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
1193 !val, 1, Q6SS_CBCR_TIMEOUT_US);
1194 if (ret)
1195 dev_err(qproc->dev, "failed to enable axim1 clock\n");
1196
1197 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
1198 !val, 1, Q6SS_CBCR_TIMEOUT_US);
1199 if (ret)
1200 dev_err(qproc->dev, "failed to enable crypto clock\n");
1201 }
1202
1203 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1204 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1205 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1206
Sibi Sankar03045302018-10-17 19:25:25 +05301207 q6v5_reset_assert(qproc);
1208
1209 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1210 qproc->reset_clk_count);
1211 q6v5_clk_disable(qproc->dev, qproc->active_clks,
1212 qproc->active_clk_count);
1213 q6v5_regulator_disable(qproc, qproc->active_regs,
1214 qproc->active_reg_count);
1215
1216 /* In case of failure or coredump scenario where reclaiming MBA memory
1217 * could not happen reclaim it here.
1218 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301219 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
Sibi Sankar03045302018-10-17 19:25:25 +05301220 qproc->mba_phys,
1221 qproc->mba_size);
1222 WARN_ON(ret);
1223
1224 ret = qcom_q6v5_unprepare(&qproc->q6v5);
1225 if (ret) {
Rajendra Nayak4760a892019-01-30 16:39:30 -08001226 q6v5_pds_disable(qproc, qproc->proxy_pds,
1227 qproc->proxy_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301228 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1229 qproc->proxy_clk_count);
Stephan Gerhold8750cf32020-09-16 12:41:31 +02001230 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1231 qproc->fallback_proxy_reg_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301232 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1233 qproc->proxy_reg_count);
1234 }
1235}
1236
Sibi Sankard96f2572020-03-05 01:17:29 +05301237static int q6v5_reload_mba(struct rproc *rproc)
1238{
1239 struct q6v5 *qproc = rproc->priv;
1240 const struct firmware *fw;
1241 int ret;
1242
1243 ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1244 if (ret < 0)
1245 return ret;
1246
1247 q6v5_load(rproc, fw);
1248 ret = q6v5_mba_load(qproc);
1249 release_firmware(fw);
1250
1251 return ret;
1252}
1253
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001254static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001255{
1256 const struct elf32_phdr *phdrs;
1257 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001258 const struct firmware *seg_fw;
1259 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001260 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001261 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001262 phys_addr_t boot_addr;
Stefan Agnerd7dc8992018-06-14 15:28:02 -07001263 phys_addr_t min_addr = PHYS_ADDR_MAX;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001264 phys_addr_t max_addr = 0;
Bjorn Andersson715d8522020-03-05 01:17:28 +05301265 u32 code_length;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001266 bool relocate = false;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301267 char *fw_name;
1268 size_t fw_name_len;
Bjorn Andersson01625cc52017-02-15 14:00:41 -08001269 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301270 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001271 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001272 int ret;
1273 int i;
1274
Sibi Sankara5a4e022019-01-15 01:20:01 +05301275 fw_name_len = strlen(qproc->hexagon_mdt_image);
1276 if (fw_name_len <= 4)
1277 return -EINVAL;
1278
1279 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1280 if (!fw_name)
1281 return -ENOMEM;
1282
1283 ret = request_firmware(&fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001284 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301285 dev_err(qproc->dev, "unable to load %s\n", fw_name);
1286 goto out;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001287 }
1288
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001289 /* Initialize the RMB validator */
1290 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1291
1292 ret = q6v5_mpss_init_image(qproc, fw);
1293 if (ret)
1294 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001295
1296 ehdr = (struct elf32_hdr *)fw->data;
1297 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001298
1299 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001300 phdr = &phdrs[i];
1301
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001302 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001303 continue;
1304
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001305 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1306 relocate = true;
1307
1308 if (phdr->p_paddr < min_addr)
1309 min_addr = phdr->p_paddr;
1310
1311 if (phdr->p_paddr + phdr->p_memsz > max_addr)
1312 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1313 }
1314
Sibi Sankar4360f932020-09-17 23:28:40 +05301315 /*
Bjorn Andersson900fc602020-03-05 01:17:27 +05301316 * In case of a modem subsystem restart on secure devices, the modem
Sibi Sankar4360f932020-09-17 23:28:40 +05301317 * memory can be reclaimed only after MBA is loaded.
Bjorn Andersson900fc602020-03-05 01:17:27 +05301318 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301319 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301320 qproc->mpss_phys, qproc->mpss_size);
1321
Bjorn Andersson715d8522020-03-05 01:17:28 +05301322 /* Share ownership between Linux and MSS, during segment loading */
1323 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1324 qproc->mpss_phys, qproc->mpss_size);
1325 if (ret) {
1326 dev_err(qproc->dev,
1327 "assigning Q6 access to mpss memory failed: %d\n", ret);
1328 ret = -EAGAIN;
1329 goto release_firmware;
1330 }
1331
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001332 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Sibi Sankar3bf62eb2018-07-27 20:50:03 +05301333 qproc->mpss_reloc = mpss_reloc;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301334 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001335 for (i = 0; i < ehdr->e_phnum; i++) {
1336 phdr = &phdrs[i];
1337
1338 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001339 continue;
1340
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001341 offset = phdr->p_paddr - mpss_reloc;
1342 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1343 dev_err(qproc->dev, "segment outside memory range\n");
1344 ret = -EINVAL;
1345 goto release_firmware;
1346 }
1347
Bjorn Andersson3d2ee782021-03-12 15:20:02 -08001348 if (phdr->p_filesz > phdr->p_memsz) {
1349 dev_err(qproc->dev,
1350 "refusing to load segment %d with p_filesz > p_memsz\n",
1351 i);
1352 ret = -EINVAL;
1353 goto release_firmware;
1354 }
1355
Sibi Sankar04ff5d12020-11-04 12:33:41 +05301356 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301357 if (!ptr) {
1358 dev_err(qproc->dev,
1359 "unable to map memory region: %pa+%zx-%x\n",
1360 &qproc->mpss_phys, offset, phdr->p_memsz);
1361 goto release_firmware;
1362 }
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001363
Bjorn Anderssonf04b9132019-06-21 18:21:46 -07001364 if (phdr->p_filesz && phdr->p_offset < fw->size) {
1365 /* Firmware is large enough to be non-split */
1366 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1367 dev_err(qproc->dev,
1368 "failed to load segment %d from truncated file %s\n",
1369 i, fw_name);
1370 ret = -EINVAL;
Sibi Sankar04ff5d12020-11-04 12:33:41 +05301371 memunmap(ptr);
Bjorn Anderssonf04b9132019-06-21 18:21:46 -07001372 goto release_firmware;
1373 }
1374
1375 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1376 } else if (phdr->p_filesz) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301377 /* Replace "xxx.xxx" with "xxx.bxx" */
1378 sprintf(fw_name + fw_name_len - 3, "b%02d", i);
Sibi Sankar135b9e82020-07-23 01:40:46 +05301379 ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
1380 ptr, phdr->p_filesz);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001381 if (ret) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301382 dev_err(qproc->dev, "failed to load %s\n", fw_name);
Sibi Sankar04ff5d12020-11-04 12:33:41 +05301383 memunmap(ptr);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001384 goto release_firmware;
1385 }
1386
Bjorn Andersson3d2ee782021-03-12 15:20:02 -08001387 if (seg_fw->size != phdr->p_filesz) {
1388 dev_err(qproc->dev,
1389 "failed to load segment %d from truncated file %s\n",
1390 i, fw_name);
1391 ret = -EINVAL;
1392 release_firmware(seg_fw);
1393 memunmap(ptr);
1394 goto release_firmware;
1395 }
1396
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001397 release_firmware(seg_fw);
1398 }
1399
1400 if (phdr->p_memsz > phdr->p_filesz) {
1401 memset(ptr + phdr->p_filesz, 0,
1402 phdr->p_memsz - phdr->p_filesz);
1403 }
Sibi Sankar04ff5d12020-11-04 12:33:41 +05301404 memunmap(ptr);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001405 size += phdr->p_memsz;
Bjorn Andersson715d8522020-03-05 01:17:28 +05301406
1407 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1408 if (!code_length) {
1409 boot_addr = relocate ? qproc->mpss_phys : min_addr;
1410 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1411 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1412 }
1413 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1414
1415 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1416 if (ret < 0) {
1417 dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1418 ret);
1419 goto release_firmware;
1420 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001421 }
1422
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301423 /* Transfer ownership of modem ddr region to q6 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301424 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301425 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001426 if (ret) {
1427 dev_err(qproc->dev,
1428 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +01001429 ret = -EAGAIN;
1430 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001431 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301432
Bjorn Andersson72beb492016-07-12 17:15:45 -07001433 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1434 if (ret == -ETIMEDOUT)
1435 dev_err(qproc->dev, "MPSS authentication timed out\n");
1436 else if (ret < 0)
1437 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1438
Bjorn Anderssond4c78d22020-06-22 12:19:40 -07001439 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
1440
Bjorn Andersson051fb702016-06-20 14:28:41 -07001441release_firmware:
1442 release_firmware(fw);
Sibi Sankara5a4e022019-01-15 01:20:01 +05301443out:
1444 kfree(fw_name);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001445
1446 return ret < 0 ? ret : 0;
1447}
1448
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301449static void qcom_q6v5_dump_segment(struct rproc *rproc,
1450 struct rproc_dump_segment *segment,
Rishabh Bhatnagar76abf9c2020-07-16 15:20:33 -07001451 void *dest, size_t cp_offset, size_t size)
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301452{
1453 int ret = 0;
1454 struct q6v5 *qproc = rproc->priv;
Sibi Sankarbe050a32020-04-15 12:46:18 +05301455 int offset = segment->da - qproc->mpss_reloc;
1456 void *ptr = NULL;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301457
1458 /* Unlock mba before copying segments */
Bjorn Andersson900fc602020-03-05 01:17:27 +05301459 if (!qproc->dump_mba_loaded) {
Sibi Sankard96f2572020-03-05 01:17:29 +05301460 ret = q6v5_reload_mba(rproc);
Bjorn Andersson900fc602020-03-05 01:17:27 +05301461 if (!ret) {
1462 /* Reset ownership back to Linux to copy segments */
1463 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +05301464 true, false,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301465 qproc->mpss_phys,
1466 qproc->mpss_size);
1467 }
1468 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301469
Sibi Sankarbe050a32020-04-15 12:46:18 +05301470 if (!ret)
Sibi Sankar04ff5d12020-11-04 12:33:41 +05301471 ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301472
1473 if (ptr) {
Rishabh Bhatnagar76abf9c2020-07-16 15:20:33 -07001474 memcpy(dest, ptr, size);
Sibi Sankar04ff5d12020-11-04 12:33:41 +05301475 memunmap(ptr);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301476 } else {
Rishabh Bhatnagar76abf9c2020-07-16 15:20:33 -07001477 memset(dest, 0xff, size);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301478 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301479
Rishabh Bhatnagar76abf9c2020-07-16 15:20:33 -07001480 qproc->current_dump_size += size;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301481
1482 /* Reclaim mba after copying segments */
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001483 if (qproc->current_dump_size == qproc->total_dump_size) {
Bjorn Andersson900fc602020-03-05 01:17:27 +05301484 if (qproc->dump_mba_loaded) {
1485 /* Try to reset ownership back to Q6 */
1486 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +05301487 false, true,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301488 qproc->mpss_phys,
1489 qproc->mpss_size);
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301490 q6v5_mba_reclaim(qproc);
Bjorn Andersson900fc602020-03-05 01:17:27 +05301491 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301492 }
1493}
1494
Bjorn Andersson051fb702016-06-20 14:28:41 -07001495static int q6v5_start(struct rproc *rproc)
1496{
1497 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301498 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001499 int ret;
1500
Sibi Sankar03045302018-10-17 19:25:25 +05301501 ret = q6v5_mba_load(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001502 if (ret)
Sibi Sankar03045302018-10-17 19:25:25 +05301503 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001504
Sibi Sankarfe6a5dc2020-07-23 01:40:47 +05301505 dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
1506 qproc->dp_size ? "" : "out");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001507
1508 ret = q6v5_mpss_load(qproc);
1509 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301510 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001511
Bjorn Andersson7d674732018-06-04 13:30:38 -07001512 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1513 if (ret == -ETIMEDOUT) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001514 dev_err(qproc->dev, "start timed out\n");
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301515 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001516 }
1517
Bjorn Andersson715d8522020-03-05 01:17:28 +05301518 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1519 false, qproc->mba_phys,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301520 qproc->mba_size);
1521 if (xfermemop_ret)
1522 dev_err(qproc->dev,
1523 "Failed to reclaim mba buffer system may become unstable\n");
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301524
1525 /* Reset Dump Segment Mask */
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001526 qproc->current_dump_size = 0;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001527
Bjorn Andersson051fb702016-06-20 14:28:41 -07001528 return 0;
1529
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301530reclaim_mpss:
Sibi Sankar03045302018-10-17 19:25:25 +05301531 q6v5_mba_reclaim(qproc);
Sibi Sankar318130c2020-07-21 16:59:35 +05301532 q6v5_dump_mba_logs(qproc);
Sibi Sankar663e9842018-05-21 22:57:09 +05301533
Bjorn Andersson051fb702016-06-20 14:28:41 -07001534 return ret;
1535}
1536
1537static int q6v5_stop(struct rproc *rproc)
1538{
1539 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1540 int ret;
1541
Bjorn Anderssoned5da802020-11-21 21:41:34 -08001542 ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
Bjorn Andersson7d674732018-06-04 13:30:38 -07001543 if (ret == -ETIMEDOUT)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001544 dev_err(qproc->dev, "timed out on wait\n");
1545
Sibi Sankar03045302018-10-17 19:25:25 +05301546 q6v5_mba_reclaim(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001547
1548 return 0;
1549}
1550
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301551static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1552 const struct firmware *mba_fw)
1553{
1554 const struct firmware *fw;
1555 const struct elf32_phdr *phdrs;
1556 const struct elf32_phdr *phdr;
1557 const struct elf32_hdr *ehdr;
1558 struct q6v5 *qproc = rproc->priv;
1559 unsigned long i;
1560 int ret;
1561
Sibi Sankara5a4e022019-01-15 01:20:01 +05301562 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301563 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301564 dev_err(qproc->dev, "unable to load %s\n",
1565 qproc->hexagon_mdt_image);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301566 return ret;
1567 }
1568
Clement Leger3898fc92020-04-10 12:24:33 +02001569 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1570
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301571 ehdr = (struct elf32_hdr *)fw->data;
1572 phdrs = (struct elf32_phdr *)(ehdr + 1);
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001573 qproc->total_dump_size = 0;
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301574
1575 for (i = 0; i < ehdr->e_phnum; i++) {
1576 phdr = &phdrs[i];
1577
1578 if (!q6v5_phdr_valid(phdr))
1579 continue;
1580
1581 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1582 phdr->p_memsz,
1583 qcom_q6v5_dump_segment,
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001584 NULL);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301585 if (ret)
1586 break;
1587
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001588 qproc->total_dump_size += phdr->p_memsz;
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301589 }
1590
1591 release_firmware(fw);
1592 return ret;
1593}
1594
Bjorn Andersson051fb702016-06-20 14:28:41 -07001595static const struct rproc_ops q6v5_ops = {
1596 .start = q6v5_start,
1597 .stop = q6v5_stop,
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301598 .parse_fw = qcom_q6v5_register_dump_segments,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -08001599 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -07001600};
1601
Bjorn Andersson7d674732018-06-04 13:30:38 -07001602static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001603{
Bjorn Andersson7d674732018-06-04 13:30:38 -07001604 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +05301605
1606 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1607 qproc->proxy_clk_count);
1608 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1609 qproc->proxy_reg_count);
Stephan Gerhold8750cf32020-09-16 12:41:31 +02001610 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1611 qproc->fallback_proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001612 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001613}
1614
1615static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1616{
1617 struct of_phandle_args args;
Sibi Sankarc8423792021-09-17 19:25:30 +05301618 int halt_cell_cnt = 3;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001619 int ret;
1620
zhaoxiao9db9c732021-09-06 15:11:47 +08001621 qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
Wei Yongjunb1653f22016-07-14 12:57:44 +00001622 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001623 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001624
zhaoxiao9db9c732021-09-06 15:11:47 +08001625 qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
Wei Yongjunb1653f22016-07-14 12:57:44 +00001626 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001627 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001628
Sibi Sankarc8423792021-09-17 19:25:30 +05301629 if (qproc->has_vq6)
1630 halt_cell_cnt++;
1631
Bjorn Andersson051fb702016-06-20 14:28:41 -07001632 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
Sibi Sankarc8423792021-09-17 19:25:30 +05301633 "qcom,halt-regs", halt_cell_cnt, 0, &args);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001634 if (ret < 0) {
1635 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1636 return -EINVAL;
1637 }
1638
1639 qproc->halt_map = syscon_node_to_regmap(args.np);
1640 of_node_put(args.np);
1641 if (IS_ERR(qproc->halt_map))
1642 return PTR_ERR(qproc->halt_map);
1643
1644 qproc->halt_q6 = args.args[0];
1645 qproc->halt_modem = args.args[1];
1646 qproc->halt_nc = args.args[2];
1647
Sibi Sankarc8423792021-09-17 19:25:30 +05301648 if (qproc->has_vq6)
1649 qproc->halt_vq6 = args.args[3];
1650
1651 if (qproc->has_qaccept_regs) {
1652 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1653 "qcom,qaccept-regs",
1654 3, 0, &args);
1655 if (ret < 0) {
1656 dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
1657 return -EINVAL;
1658 }
1659
1660 qproc->qaccept_mdm = args.args[0];
1661 qproc->qaccept_cx = args.args[1];
1662 qproc->qaccept_axi = args.args[2];
1663 }
1664
1665 if (qproc->has_ext_cntl_regs) {
1666 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1667 "qcom,ext-regs",
1668 2, 0, &args);
1669 if (ret < 0) {
1670 dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
1671 return -EINVAL;
1672 }
1673
1674 qproc->conn_map = syscon_node_to_regmap(args.np);
1675 of_node_put(args.np);
1676 if (IS_ERR(qproc->conn_map))
1677 return PTR_ERR(qproc->conn_map);
1678
1679 qproc->force_clk_on = args.args[0];
1680 qproc->rscc_disable = args.args[1];
1681
1682 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1683 "qcom,ext-regs",
1684 2, 1, &args);
1685 if (ret < 0) {
1686 dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
1687 return -EINVAL;
1688 }
1689
1690 qproc->axim1_clk_off = args.args[0];
1691 qproc->crypto_clk_off = args.args[1];
1692 }
1693
Sibi Sankara9fdc792020-04-15 20:21:10 +05301694 if (qproc->has_spare_reg) {
Sibi Sankar6439b522019-12-19 11:15:06 +05301695 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301696 "qcom,spare-regs",
Sibi Sankar6439b522019-12-19 11:15:06 +05301697 1, 0, &args);
1698 if (ret < 0) {
Sibi Sankara9fdc792020-04-15 20:21:10 +05301699 dev_err(&pdev->dev, "failed to parse spare-regs\n");
Sibi Sankar6439b522019-12-19 11:15:06 +05301700 return -EINVAL;
1701 }
1702
1703 qproc->conn_map = syscon_node_to_regmap(args.np);
1704 of_node_put(args.np);
1705 if (IS_ERR(qproc->conn_map))
1706 return PTR_ERR(qproc->conn_map);
1707
1708 qproc->conn_box = args.args[0];
1709 }
1710
Bjorn Andersson051fb702016-06-20 14:28:41 -07001711 return 0;
1712}
1713
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301714static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1715 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001716{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301717 int i;
1718
1719 if (!clk_names)
1720 return 0;
1721
1722 for (i = 0; clk_names[i]; i++) {
1723 clks[i] = devm_clk_get(dev, clk_names[i]);
1724 if (IS_ERR(clks[i])) {
1725 int rc = PTR_ERR(clks[i]);
1726
1727 if (rc != -EPROBE_DEFER)
1728 dev_err(dev, "Failed to get %s clock\n",
1729 clk_names[i]);
1730 return rc;
1731 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001732 }
1733
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301734 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001735}
1736
Rajendra Nayak4760a892019-01-30 16:39:30 -08001737static int q6v5_pds_attach(struct device *dev, struct device **devs,
1738 char **pd_names)
1739{
1740 size_t num_pds = 0;
1741 int ret;
1742 int i;
1743
1744 if (!pd_names)
1745 return 0;
1746
1747 while (pd_names[num_pds])
1748 num_pds++;
1749
1750 for (i = 0; i < num_pds; i++) {
1751 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
Sibi Sankarf2583fd2019-08-21 23:35:48 +05301752 if (IS_ERR_OR_NULL(devs[i])) {
1753 ret = PTR_ERR(devs[i]) ? : -ENODATA;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001754 goto unroll_attach;
1755 }
1756 }
1757
1758 return num_pds;
1759
1760unroll_attach:
1761 for (i--; i >= 0; i--)
1762 dev_pm_domain_detach(devs[i], false);
1763
1764 return ret;
Alex Elder58396812020-04-03 12:50:05 -05001765}
Rajendra Nayak4760a892019-01-30 16:39:30 -08001766
1767static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1768 size_t pd_count)
1769{
1770 int i;
1771
1772 for (i = 0; i < pd_count; i++)
1773 dev_pm_domain_detach(pds[i], false);
1774}
1775
Bjorn Andersson051fb702016-06-20 14:28:41 -07001776static int q6v5_init_reset(struct q6v5 *qproc)
1777{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001778 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
Sibi Sankar9e483ef2018-08-30 00:42:14 +05301779 "mss_restart");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001780 if (IS_ERR(qproc->mss_restart)) {
1781 dev_err(qproc->dev, "failed to acquire mss restart\n");
1782 return PTR_ERR(qproc->mss_restart);
1783 }
1784
Sibi Sankarc8423792021-09-17 19:25:30 +05301785 if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301786 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1787 "pdc_reset");
1788 if (IS_ERR(qproc->pdc_reset)) {
1789 dev_err(qproc->dev, "failed to acquire pdc reset\n");
1790 return PTR_ERR(qproc->pdc_reset);
1791 }
1792 }
1793
Bjorn Andersson051fb702016-06-20 14:28:41 -07001794 return 0;
1795}
1796
Bjorn Andersson051fb702016-06-20 14:28:41 -07001797static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1798{
1799 struct device_node *child;
1800 struct device_node *node;
1801 struct resource r;
1802 int ret;
1803
Sibi Sankar6663ce62020-04-21 20:02:25 +05301804 /*
1805 * In the absence of mba/mpss sub-child, extract the mba and mpss
1806 * reserved memory regions from device's memory-region property.
1807 */
Bjorn Andersson051fb702016-06-20 14:28:41 -07001808 child = of_get_child_by_name(qproc->dev->of_node, "mba");
Sibi Sankar6663ce62020-04-21 20:02:25 +05301809 if (!child)
1810 node = of_parse_phandle(qproc->dev->of_node,
1811 "memory-region", 0);
1812 else
1813 node = of_parse_phandle(child, "memory-region", 0);
1814
Bjorn Andersson051fb702016-06-20 14:28:41 -07001815 ret = of_address_to_resource(node, 0, &r);
1816 if (ret) {
1817 dev_err(qproc->dev, "unable to resolve mba region\n");
1818 return ret;
1819 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001820 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001821
1822 qproc->mba_phys = r.start;
1823 qproc->mba_size = resource_size(&r);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001824
Sibi Sankar6663ce62020-04-21 20:02:25 +05301825 if (!child) {
1826 node = of_parse_phandle(qproc->dev->of_node,
1827 "memory-region", 1);
1828 } else {
1829 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1830 node = of_parse_phandle(child, "memory-region", 0);
1831 }
1832
Bjorn Andersson051fb702016-06-20 14:28:41 -07001833 ret = of_address_to_resource(node, 0, &r);
1834 if (ret) {
1835 dev_err(qproc->dev, "unable to resolve mpss region\n");
1836 return ret;
1837 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001838 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001839
1840 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1841 qproc->mpss_size = resource_size(&r);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001842
1843 return 0;
1844}
1845
1846static int q6v5_probe(struct platform_device *pdev)
1847{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301848 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001849 struct q6v5 *qproc;
1850 struct rproc *rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301851 const char *mba_image;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001852 int ret;
1853
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301854 desc = of_device_get_match_data(&pdev->dev);
1855 if (!desc)
1856 return -EINVAL;
1857
Brian Norrisbbcda302018-10-08 19:08:05 -07001858 if (desc->need_mem_protection && !qcom_scm_is_available())
1859 return -EPROBE_DEFER;
1860
Sibi Sankara5a4e022019-01-15 01:20:01 +05301861 mba_image = desc->hexagon_mba_image;
1862 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1863 0, &mba_image);
Bjorn Andersson9af2a2a2021-03-11 16:26:05 -08001864 if (ret < 0 && ret != -EINVAL) {
1865 dev_err(&pdev->dev, "unable to read mba firmware-name\n");
Sibi Sankara5a4e022019-01-15 01:20:01 +05301866 return ret;
Bjorn Andersson9af2a2a2021-03-11 16:26:05 -08001867 }
Sibi Sankara5a4e022019-01-15 01:20:01 +05301868
Bjorn Andersson051fb702016-06-20 14:28:41 -07001869 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Sibi Sankara5a4e022019-01-15 01:20:01 +05301870 mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001871 if (!rproc) {
1872 dev_err(&pdev->dev, "failed to allocate rproc\n");
1873 return -ENOMEM;
1874 }
1875
Ramon Fried41071022018-05-24 22:21:41 +03001876 rproc->auto_boot = false;
Clement Leger3898fc92020-04-10 12:24:33 +02001877 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
Ramon Fried41071022018-05-24 22:21:41 +03001878
Bjorn Andersson051fb702016-06-20 14:28:41 -07001879 qproc = (struct q6v5 *)rproc->priv;
1880 qproc->dev = &pdev->dev;
1881 qproc->rproc = rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301882 qproc->hexagon_mdt_image = "modem.mdt";
1883 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1884 1, &qproc->hexagon_mdt_image);
Bjorn Andersson9af2a2a2021-03-11 16:26:05 -08001885 if (ret < 0 && ret != -EINVAL) {
1886 dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
Alex Elder13c060b2020-04-03 12:50:04 -05001887 goto free_rproc;
Bjorn Andersson9af2a2a2021-03-11 16:26:05 -08001888 }
Sibi Sankara5a4e022019-01-15 01:20:01 +05301889
Bjorn Andersson051fb702016-06-20 14:28:41 -07001890 platform_set_drvdata(pdev, qproc);
1891
Sibi Sankarc8423792021-09-17 19:25:30 +05301892 qproc->has_qaccept_regs = desc->has_qaccept_regs;
1893 qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
1894 qproc->has_vq6 = desc->has_vq6;
Sibi Sankara9fdc792020-04-15 20:21:10 +05301895 qproc->has_spare_reg = desc->has_spare_reg;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001896 ret = q6v5_init_mem(qproc, pdev);
1897 if (ret)
1898 goto free_rproc;
1899
1900 ret = q6v5_alloc_memory_region(qproc);
1901 if (ret)
1902 goto free_rproc;
1903
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301904 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1905 desc->proxy_clk_names);
1906 if (ret < 0) {
1907 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001908 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301909 }
1910 qproc->proxy_clk_count = ret;
1911
Sibi Sankar231f67d2018-05-21 22:57:13 +05301912 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1913 desc->reset_clk_names);
1914 if (ret < 0) {
1915 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1916 goto free_rproc;
1917 }
1918 qproc->reset_clk_count = ret;
1919
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301920 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1921 desc->active_clk_names);
1922 if (ret < 0) {
1923 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1924 goto free_rproc;
1925 }
1926 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001927
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301928 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1929 desc->proxy_supply);
1930 if (ret < 0) {
1931 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001932 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301933 }
1934 qproc->proxy_reg_count = ret;
1935
1936 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1937 desc->active_supply);
1938 if (ret < 0) {
1939 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1940 goto free_rproc;
1941 }
1942 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001943
Rajendra Nayak4760a892019-01-30 16:39:30 -08001944 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1945 desc->proxy_pd_names);
Stephan Gerhold8750cf32020-09-16 12:41:31 +02001946 /* Fallback to regulators for old device trees */
1947 if (ret == -ENODATA && desc->fallback_proxy_supply) {
1948 ret = q6v5_regulator_init(&pdev->dev,
1949 qproc->fallback_proxy_regs,
1950 desc->fallback_proxy_supply);
1951 if (ret < 0) {
1952 dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
Sibi Sankarc1fe10d2021-09-16 19:29:21 +05301953 goto free_rproc;
Stephan Gerhold8750cf32020-09-16 12:41:31 +02001954 }
1955 qproc->fallback_proxy_reg_count = ret;
1956 } else if (ret < 0) {
Rajendra Nayak4760a892019-01-30 16:39:30 -08001957 dev_err(&pdev->dev, "Failed to init power domains\n");
Sibi Sankarc1fe10d2021-09-16 19:29:21 +05301958 goto free_rproc;
Stephan Gerhold8750cf32020-09-16 12:41:31 +02001959 } else {
1960 qproc->proxy_pd_count = ret;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001961 }
Rajendra Nayak4760a892019-01-30 16:39:30 -08001962
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301963 qproc->has_alt_reset = desc->has_alt_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001964 ret = q6v5_init_reset(qproc);
1965 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001966 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001967
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301968 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301969 qproc->need_mem_protection = desc->need_mem_protection;
Sibi Sankar318130c2020-07-21 16:59:35 +05301970 qproc->has_mba_logs = desc->has_mba_logs;
Bjorn Andersson7d674732018-06-04 13:30:38 -07001971
Sibi Sankarc1fe10d2021-09-16 19:29:21 +05301972 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
Bjorn Andersson7d674732018-06-04 13:30:38 -07001973 qcom_msa_handover);
1974 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001975 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001976
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301977 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1978 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Anderssoncd9fc8f2020-04-22 17:37:33 -07001979 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
Bjorn Andersson4b489212017-01-29 14:05:50 -08001980 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001981 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001982 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
Sibi Sankar027045a2019-01-08 15:53:43 +05301983 if (IS_ERR(qproc->sysmon)) {
1984 ret = PTR_ERR(qproc->sysmon);
Alex Elder58396812020-04-03 12:50:05 -05001985 goto remove_subdevs;
Sibi Sankar027045a2019-01-08 15:53:43 +05301986 }
Bjorn Andersson4b489212017-01-29 14:05:50 -08001987
Bjorn Andersson051fb702016-06-20 14:28:41 -07001988 ret = rproc_add(rproc);
1989 if (ret)
Alex Elder58396812020-04-03 12:50:05 -05001990 goto remove_sysmon_subdev;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001991
1992 return 0;
1993
Alex Elder58396812020-04-03 12:50:05 -05001994remove_sysmon_subdev:
1995 qcom_remove_sysmon_subdev(qproc->sysmon);
1996remove_subdevs:
Alex Elder58396812020-04-03 12:50:05 -05001997 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1998 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1999 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2000detach_proxy_pds:
Rajendra Nayak4760a892019-01-30 16:39:30 -08002001 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07002002free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07002003 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07002004
2005 return ret;
2006}
2007
2008static int q6v5_remove(struct platform_device *pdev)
2009{
2010 struct q6v5 *qproc = platform_get_drvdata(pdev);
Alex Elder58396812020-04-03 12:50:05 -05002011 struct rproc *rproc = qproc->rproc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07002012
Alex Elder58396812020-04-03 12:50:05 -05002013 rproc_del(rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08002014
Sibi Sankarc1fe10d2021-09-16 19:29:21 +05302015 qcom_q6v5_deinit(&qproc->q6v5);
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07002016 qcom_remove_sysmon_subdev(qproc->sysmon);
Alex Elder58396812020-04-03 12:50:05 -05002017 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2018 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2019 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
Rajendra Nayak4760a892019-01-30 16:39:30 -08002020
2021 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2022
Alex Elder58396812020-04-03 12:50:05 -05002023 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07002024
2025 return 0;
2026}
2027
Sibi Sankar6439b522019-12-19 11:15:06 +05302028static const struct rproc_hexagon_res sc7180_mss = {
2029 .hexagon_mba_image = "mba.mbn",
2030 .proxy_clk_names = (char*[]){
2031 "xo",
2032 NULL
2033 },
2034 .reset_clk_names = (char*[]){
2035 "iface",
2036 "bus",
2037 "snoc_axi",
2038 NULL
2039 },
2040 .active_clk_names = (char*[]){
2041 "mnoc_axi",
2042 "nav",
Sibi Sankar6439b522019-12-19 11:15:06 +05302043 NULL
2044 },
Sibi Sankar6439b522019-12-19 11:15:06 +05302045 .proxy_pd_names = (char*[]){
2046 "cx",
2047 "mx",
2048 "mss",
2049 NULL
2050 },
2051 .need_mem_protection = true,
2052 .has_alt_reset = false,
Sibi Sankar318130c2020-07-21 16:59:35 +05302053 .has_mba_logs = true,
Sibi Sankara9fdc792020-04-15 20:21:10 +05302054 .has_spare_reg = true,
Sibi Sankarc8423792021-09-17 19:25:30 +05302055 .has_qaccept_regs = false,
2056 .has_ext_cntl_regs = false,
2057 .has_vq6 = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05302058 .version = MSS_SC7180,
2059};
2060
Sibi Sankarc8423792021-09-17 19:25:30 +05302061static const struct rproc_hexagon_res sc7280_mss = {
2062 .hexagon_mba_image = "mba.mbn",
2063 .proxy_clk_names = (char*[]){
2064 "xo",
2065 "pka",
2066 NULL
2067 },
2068 .active_clk_names = (char*[]){
2069 "iface",
2070 "offline",
2071 "snoc_axi",
2072 NULL
2073 },
2074 .proxy_pd_names = (char*[]){
2075 "cx",
2076 "mss",
2077 NULL
2078 },
2079 .need_mem_protection = true,
2080 .has_alt_reset = false,
2081 .has_mba_logs = true,
2082 .has_spare_reg = false,
2083 .has_qaccept_regs = true,
2084 .has_ext_cntl_regs = true,
2085 .has_vq6 = true,
2086 .version = MSS_SC7280,
2087};
2088
Sibi Sankar231f67d2018-05-21 22:57:13 +05302089static const struct rproc_hexagon_res sdm845_mss = {
2090 .hexagon_mba_image = "mba.mbn",
2091 .proxy_clk_names = (char*[]){
2092 "xo",
Sibi Sankar231f67d2018-05-21 22:57:13 +05302093 "prng",
2094 NULL
2095 },
2096 .reset_clk_names = (char*[]){
2097 "iface",
2098 "snoc_axi",
2099 NULL
2100 },
2101 .active_clk_names = (char*[]){
2102 "bus",
2103 "mem",
2104 "gpll0_mss",
2105 "mnoc_axi",
2106 NULL
2107 },
Rajendra Nayak4760a892019-01-30 16:39:30 -08002108 .proxy_pd_names = (char*[]){
2109 "cx",
2110 "mx",
2111 "mss",
2112 NULL
2113 },
Sibi Sankar231f67d2018-05-21 22:57:13 +05302114 .need_mem_protection = true,
2115 .has_alt_reset = true,
Sibi Sankar318130c2020-07-21 16:59:35 +05302116 .has_mba_logs = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05302117 .has_spare_reg = false,
Sibi Sankarc8423792021-09-17 19:25:30 +05302118 .has_qaccept_regs = false,
2119 .has_ext_cntl_regs = false,
2120 .has_vq6 = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05302121 .version = MSS_SDM845,
2122};
2123
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07002124static const struct rproc_hexagon_res msm8998_mss = {
2125 .hexagon_mba_image = "mba.mbn",
2126 .proxy_clk_names = (char*[]){
2127 "xo",
2128 "qdss",
2129 "mem",
2130 NULL
2131 },
2132 .active_clk_names = (char*[]){
2133 "iface",
2134 "bus",
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07002135 "gpll0_mss",
2136 "mnoc_axi",
2137 "snoc_axi",
2138 NULL
2139 },
2140 .proxy_pd_names = (char*[]){
2141 "cx",
2142 "mx",
2143 NULL
2144 },
2145 .need_mem_protection = true,
2146 .has_alt_reset = false,
Sibi Sankar318130c2020-07-21 16:59:35 +05302147 .has_mba_logs = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05302148 .has_spare_reg = false,
Sibi Sankarc8423792021-09-17 19:25:30 +05302149 .has_qaccept_regs = false,
2150 .has_ext_cntl_regs = false,
2151 .has_vq6 = false,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07002152 .version = MSS_MSM8998,
2153};
2154
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05302155static const struct rproc_hexagon_res msm8996_mss = {
2156 .hexagon_mba_image = "mba.mbn",
Sibi Sankar47b87472018-12-29 00:23:05 +05302157 .proxy_supply = (struct qcom_mss_reg_res[]) {
2158 {
2159 .supply = "pll",
2160 .uA = 100000,
2161 },
2162 {}
2163 },
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05302164 .proxy_clk_names = (char*[]){
2165 "xo",
2166 "pnoc",
Sibi Sankar80ec4192018-12-29 00:23:03 +05302167 "qdss",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05302168 NULL
2169 },
2170 .active_clk_names = (char*[]){
2171 "iface",
2172 "bus",
2173 "mem",
Sibi Sankar80ec4192018-12-29 00:23:03 +05302174 "gpll0_mss",
2175 "snoc_axi",
2176 "mnoc_axi",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05302177 NULL
2178 },
2179 .need_mem_protection = true,
Sibi Sankar231f67d2018-05-21 22:57:13 +05302180 .has_alt_reset = false,
Sibi Sankar318130c2020-07-21 16:59:35 +05302181 .has_mba_logs = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05302182 .has_spare_reg = false,
Sibi Sankarc8423792021-09-17 19:25:30 +05302183 .has_qaccept_regs = false,
2184 .has_ext_cntl_regs = false,
2185 .has_vq6 = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05302186 .version = MSS_MSM8996,
2187};
2188
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05302189static const struct rproc_hexagon_res msm8916_mss = {
2190 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05302191 .proxy_supply = (struct qcom_mss_reg_res[]) {
2192 {
Stephan Gerhold8750cf32020-09-16 12:41:31 +02002193 .supply = "pll",
2194 .uA = 100000,
2195 },
2196 {}
2197 },
2198 .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2199 {
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05302200 .supply = "mx",
2201 .uV = 1050000,
2202 },
2203 {
2204 .supply = "cx",
2205 .uA = 100000,
2206 },
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05302207 {}
2208 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05302209 .proxy_clk_names = (char*[]){
2210 "xo",
2211 NULL
2212 },
2213 .active_clk_names = (char*[]){
2214 "iface",
2215 "bus",
2216 "mem",
2217 NULL
2218 },
Stephan Gerhold8750cf32020-09-16 12:41:31 +02002219 .proxy_pd_names = (char*[]){
2220 "mx",
2221 "cx",
2222 NULL
2223 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05302224 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05302225 .has_alt_reset = false,
Sibi Sankar318130c2020-07-21 16:59:35 +05302226 .has_mba_logs = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05302227 .has_spare_reg = false,
Sibi Sankarc8423792021-09-17 19:25:30 +05302228 .has_qaccept_regs = false,
2229 .has_ext_cntl_regs = false,
2230 .has_vq6 = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05302231 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05302232};
2233
2234static const struct rproc_hexagon_res msm8974_mss = {
2235 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05302236 .proxy_supply = (struct qcom_mss_reg_res[]) {
2237 {
Stephan Gerhold8750cf32020-09-16 12:41:31 +02002238 .supply = "pll",
2239 .uA = 100000,
2240 },
2241 {}
2242 },
2243 .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2244 {
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05302245 .supply = "mx",
2246 .uV = 1050000,
2247 },
2248 {
2249 .supply = "cx",
2250 .uA = 100000,
2251 },
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05302252 {}
2253 },
2254 .active_supply = (struct qcom_mss_reg_res[]) {
2255 {
2256 .supply = "mss",
2257 .uV = 1050000,
2258 .uA = 100000,
2259 },
2260 {}
2261 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05302262 .proxy_clk_names = (char*[]){
2263 "xo",
2264 NULL
2265 },
2266 .active_clk_names = (char*[]){
2267 "iface",
2268 "bus",
2269 "mem",
2270 NULL
2271 },
Stephan Gerhold8750cf32020-09-16 12:41:31 +02002272 .proxy_pd_names = (char*[]){
2273 "mx",
2274 "cx",
2275 NULL
2276 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05302277 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05302278 .has_alt_reset = false,
Sibi Sankar318130c2020-07-21 16:59:35 +05302279 .has_mba_logs = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05302280 .has_spare_reg = false,
Sibi Sankarc8423792021-09-17 19:25:30 +05302281 .has_qaccept_regs = false,
2282 .has_ext_cntl_regs = false,
2283 .has_vq6 = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05302284 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05302285};
2286
Bjorn Andersson051fb702016-06-20 14:28:41 -07002287static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05302288 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
2289 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
2290 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05302291 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07002292 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
Sibi Sankar6439b522019-12-19 11:15:06 +05302293 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
Sibi Sankarc8423792021-09-17 19:25:30 +05302294 { .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
Sibi Sankar231f67d2018-05-21 22:57:13 +05302295 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07002296 { },
2297};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03002298MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07002299
2300static struct platform_driver q6v5_driver = {
2301 .probe = q6v5_probe,
2302 .remove = q6v5_remove,
2303 .driver = {
Bjorn Anderssonef73c222018-09-24 16:45:26 -07002304 .name = "qcom-q6v5-mss",
Bjorn Andersson051fb702016-06-20 14:28:41 -07002305 .of_match_table = q6v5_of_match,
2306 },
2307};
2308module_platform_driver(q6v5_driver);
2309
Bjorn Anderssonef73c222018-09-24 16:45:26 -07002310MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
Bjorn Andersson051fb702016-06-20 14:28:41 -07002311MODULE_LICENSE("GPL v2");