blob: 1fd270a900f4f6f86f8cf18ee92795a7704a25db [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Bjorn Andersson051fb702016-06-20 14:28:41 -07002/*
Bjorn Anderssonef73c222018-09-24 16:45:26 -07003 * Qualcomm self-authenticating modem subsystem remoteproc driver
Bjorn Andersson051fb702016-06-20 14:28:41 -07004 *
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bjorn Andersson051fb702016-06-20 14:28:41 -07008 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
16#include <linux/module.h>
17#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053018#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070019#include <linux/platform_device.h>
Rajendra Nayak4760a892019-01-30 16:39:30 -080020#include <linux/pm_domain.h>
21#include <linux/pm_runtime.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070022#include <linux/regmap.h>
23#include <linux/regulator/consumer.h>
24#include <linux/remoteproc.h>
Alex Elderd7f5f3c2020-03-05 22:28:15 -060025#include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070026#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080027#include <linux/soc/qcom/mdt_loader.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053028#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070029
30#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080031#include "qcom_common.h"
Bjorn Anderssond4c78d22020-06-22 12:19:40 -070032#include "qcom_pil_info.h"
Bjorn Andersson7d674732018-06-04 13:30:38 -070033#include "qcom_q6v5.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070034
35#include <linux/qcom_scm.h>
36
Bjorn Andersson051fb702016-06-20 14:28:41 -070037#define MPSS_CRASH_REASON_SMEM 421
38
39/* RMB Status Register Values */
40#define RMB_PBL_SUCCESS 0x1
41
42#define RMB_MBA_XPU_UNLOCKED 0x1
43#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
44#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
45#define RMB_MBA_AUTH_COMPLETE 0x4
46
47/* PBL/MBA interface registers */
48#define RMB_MBA_IMAGE_REG 0x00
49#define RMB_PBL_STATUS_REG 0x04
50#define RMB_MBA_COMMAND_REG 0x08
51#define RMB_MBA_STATUS_REG 0x0C
52#define RMB_PMI_META_DATA_REG 0x10
53#define RMB_PMI_CODE_START_REG 0x14
54#define RMB_PMI_CODE_LENGTH_REG 0x18
Sibi Sankar231f67d2018-05-21 22:57:13 +053055#define RMB_MBA_MSS_STATUS 0x40
56#define RMB_MBA_ALT_RESET 0x44
Bjorn Andersson051fb702016-06-20 14:28:41 -070057
58#define RMB_CMD_META_DATA_READY 0x1
59#define RMB_CMD_LOAD_READY 0x2
60
61/* QDSP6SS Register Offsets */
62#define QDSP6SS_RESET_REG 0x014
63#define QDSP6SS_GFMUX_CTL_REG 0x020
64#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053065#define QDSP6SS_MEM_PWR_CTL 0x0B0
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -070066#define QDSP6V6SS_MEM_PWR_CTL 0x034
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053067#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070068
69/* AXI Halt Register Offsets */
70#define AXI_HALTREQ_REG 0x0
71#define AXI_HALTACK_REG 0x4
72#define AXI_IDLE_REG 0x8
Sibi Sankar600c39b2020-01-23 18:42:36 +053073#define AXI_GATING_VALID_OVERRIDE BIT(0)
Bjorn Andersson051fb702016-06-20 14:28:41 -070074
Sibi Sankar01bf3fe2020-01-23 18:42:35 +053075#define HALT_ACK_TIMEOUT_US 100000
Bjorn Andersson051fb702016-06-20 14:28:41 -070076
77/* QDSP6SS_RESET */
78#define Q6SS_STOP_CORE BIT(0)
79#define Q6SS_CORE_ARES BIT(1)
80#define Q6SS_BUS_ARES_ENABLE BIT(2)
81
Sibi Sankar7e0f8682020-01-17 19:21:28 +053082/* QDSP6SS CBCR */
83#define Q6SS_CBCR_CLKEN BIT(0)
84#define Q6SS_CBCR_CLKOFF BIT(31)
85#define Q6SS_CBCR_TIMEOUT_US 200
86
Bjorn Andersson051fb702016-06-20 14:28:41 -070087/* QDSP6SS_GFMUX_CTL */
88#define Q6SS_CLK_ENABLE BIT(1)
89
90/* QDSP6SS_PWR_CTL */
91#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
92#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
93#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
94#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
95#define Q6SS_ETB_SLP_NRET_N BIT(17)
96#define Q6SS_L2DATA_STBY_N BIT(18)
97#define Q6SS_SLP_RET_N BIT(19)
98#define Q6SS_CLAMP_IO BIT(20)
99#define QDSS_BHS_ON BIT(21)
100#define QDSS_LDO_BYP BIT(22)
101
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530102/* QDSP6v56 parameters */
103#define QDSP6v56_LDO_BYP BIT(25)
104#define QDSP6v56_BHS_ON BIT(24)
105#define QDSP6v56_CLAMP_WL BIT(21)
106#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530107#define QDSP6SS_XO_CBCR 0x0038
108#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
109
Sibi Sankar231f67d2018-05-21 22:57:13 +0530110/* QDSP6v65 parameters */
Sibi Sankar6439b522019-12-19 11:15:06 +0530111#define QDSP6SS_CORE_CBCR 0x20
Sibi Sankar231f67d2018-05-21 22:57:13 +0530112#define QDSP6SS_SLEEP 0x3C
113#define QDSP6SS_BOOT_CORE_START 0x400
114#define QDSP6SS_BOOT_CMD 0x404
Sibi Sankar231f67d2018-05-21 22:57:13 +0530115#define BOOT_FSM_TIMEOUT 10000
116
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530117struct reg_info {
118 struct regulator *reg;
119 int uV;
120 int uA;
121};
122
123struct qcom_mss_reg_res {
124 const char *supply;
125 int uV;
126 int uA;
127};
128
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530129struct rproc_hexagon_res {
130 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100131 struct qcom_mss_reg_res *proxy_supply;
132 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530133 char **proxy_clk_names;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530134 char **reset_clk_names;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530135 char **active_clk_names;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800136 char **active_pd_names;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800137 char **proxy_pd_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530138 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530139 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530140 bool has_alt_reset;
Sibi Sankara9fdc792020-04-15 20:21:10 +0530141 bool has_spare_reg;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530142};
143
Bjorn Andersson051fb702016-06-20 14:28:41 -0700144struct q6v5 {
145 struct device *dev;
146 struct rproc *rproc;
147
148 void __iomem *reg_base;
149 void __iomem *rmb_base;
150
151 struct regmap *halt_map;
Sibi Sankar6439b522019-12-19 11:15:06 +0530152 struct regmap *conn_map;
153
Bjorn Andersson051fb702016-06-20 14:28:41 -0700154 u32 halt_q6;
155 u32 halt_modem;
156 u32 halt_nc;
Sibi Sankar6439b522019-12-19 11:15:06 +0530157 u32 conn_box;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700158
159 struct reset_control *mss_restart;
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530160 struct reset_control *pdc_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700161
Bjorn Andersson7d674732018-06-04 13:30:38 -0700162 struct qcom_q6v5 q6v5;
Sibi Sankar663e9842018-05-21 22:57:09 +0530163
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530164 struct clk *active_clks[8];
Sibi Sankar231f67d2018-05-21 22:57:13 +0530165 struct clk *reset_clks[4];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530166 struct clk *proxy_clks[4];
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800167 struct device *active_pds[1];
Rajendra Nayak4760a892019-01-30 16:39:30 -0800168 struct device *proxy_pds[3];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530169 int active_clk_count;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530170 int reset_clk_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530171 int proxy_clk_count;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800172 int active_pd_count;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800173 int proxy_pd_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530174
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530175 struct reg_info active_regs[1];
176 struct reg_info proxy_regs[3];
177 int active_reg_count;
178 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700179
Bjorn Andersson051fb702016-06-20 14:28:41 -0700180 bool running;
181
Sibi Sankar03045302018-10-17 19:25:25 +0530182 bool dump_mba_loaded;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +0530183 unsigned long dump_segment_mask;
184 unsigned long dump_complete_mask;
185
Bjorn Andersson051fb702016-06-20 14:28:41 -0700186 phys_addr_t mba_phys;
187 void *mba_region;
188 size_t mba_size;
189
190 phys_addr_t mpss_phys;
191 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700192 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800193
Sibi Sankar47254962018-05-21 22:57:14 +0530194 struct qcom_rproc_glink glink_subdev;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800195 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700196 struct qcom_rproc_ssr ssr_subdev;
Alex Elderd7f5f3c2020-03-05 22:28:15 -0600197 struct qcom_rproc_ipa_notify ipa_notify_subdev;
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700198 struct qcom_sysmon *sysmon;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530199 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530200 bool has_alt_reset;
Sibi Sankara9fdc792020-04-15 20:21:10 +0530201 bool has_spare_reg;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530202 int mpss_perm;
203 int mba_perm;
Sibi Sankara5a4e022019-01-15 01:20:01 +0530204 const char *hexagon_mdt_image;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530205 int version;
206};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530207
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530208enum {
209 MSS_MSM8916,
210 MSS_MSM8974,
211 MSS_MSM8996,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700212 MSS_MSM8998,
Sibi Sankar6439b522019-12-19 11:15:06 +0530213 MSS_SC7180,
Sibi Sankar231f67d2018-05-21 22:57:13 +0530214 MSS_SDM845,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700215};
216
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530217static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
218 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700219{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530220 int rc;
221 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700222
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800223 if (!reg_res)
224 return 0;
225
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530226 for (i = 0; reg_res[i].supply; i++) {
227 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
228 if (IS_ERR(regs[i].reg)) {
229 rc = PTR_ERR(regs[i].reg);
230 if (rc != -EPROBE_DEFER)
231 dev_err(dev, "Failed to get %s\n regulator",
232 reg_res[i].supply);
233 return rc;
234 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700235
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530236 regs[i].uV = reg_res[i].uV;
237 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700238 }
239
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530240 return i;
241}
242
243static int q6v5_regulator_enable(struct q6v5 *qproc,
244 struct reg_info *regs, int count)
245{
246 int ret;
247 int i;
248
249 for (i = 0; i < count; i++) {
250 if (regs[i].uV > 0) {
251 ret = regulator_set_voltage(regs[i].reg,
252 regs[i].uV, INT_MAX);
253 if (ret) {
254 dev_err(qproc->dev,
255 "Failed to request voltage for %d.\n",
256 i);
257 goto err;
258 }
259 }
260
261 if (regs[i].uA > 0) {
262 ret = regulator_set_load(regs[i].reg,
263 regs[i].uA);
264 if (ret < 0) {
265 dev_err(qproc->dev,
266 "Failed to set regulator mode\n");
267 goto err;
268 }
269 }
270
271 ret = regulator_enable(regs[i].reg);
272 if (ret) {
273 dev_err(qproc->dev, "Regulator enable failed\n");
274 goto err;
275 }
276 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700277
278 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530279err:
280 for (; i >= 0; i--) {
281 if (regs[i].uV > 0)
282 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
283
284 if (regs[i].uA > 0)
285 regulator_set_load(regs[i].reg, 0);
286
287 regulator_disable(regs[i].reg);
288 }
289
290 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700291}
292
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530293static void q6v5_regulator_disable(struct q6v5 *qproc,
294 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700295{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530296 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700297
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530298 for (i = 0; i < count; i++) {
299 if (regs[i].uV > 0)
300 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700301
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530302 if (regs[i].uA > 0)
303 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700304
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530305 regulator_disable(regs[i].reg);
306 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700307}
308
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530309static int q6v5_clk_enable(struct device *dev,
310 struct clk **clks, int count)
311{
312 int rc;
313 int i;
314
315 for (i = 0; i < count; i++) {
316 rc = clk_prepare_enable(clks[i]);
317 if (rc) {
318 dev_err(dev, "Clock enable failed\n");
319 goto err;
320 }
321 }
322
323 return 0;
324err:
325 for (i--; i >= 0; i--)
326 clk_disable_unprepare(clks[i]);
327
328 return rc;
329}
330
331static void q6v5_clk_disable(struct device *dev,
332 struct clk **clks, int count)
333{
334 int i;
335
336 for (i = 0; i < count; i++)
337 clk_disable_unprepare(clks[i]);
338}
339
Rajendra Nayak4760a892019-01-30 16:39:30 -0800340static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
341 size_t pd_count)
342{
343 int ret;
344 int i;
345
346 for (i = 0; i < pd_count; i++) {
347 dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
348 ret = pm_runtime_get_sync(pds[i]);
349 if (ret < 0)
350 goto unroll_pd_votes;
351 }
352
353 return 0;
354
355unroll_pd_votes:
356 for (i--; i >= 0; i--) {
357 dev_pm_genpd_set_performance_state(pds[i], 0);
358 pm_runtime_put(pds[i]);
359 }
360
361 return ret;
Alex Elder58396812020-04-03 12:50:05 -0500362}
Rajendra Nayak4760a892019-01-30 16:39:30 -0800363
364static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
365 size_t pd_count)
366{
367 int i;
368
369 for (i = 0; i < pd_count; i++) {
370 dev_pm_genpd_set_performance_state(pds[i], 0);
371 pm_runtime_put(pds[i]);
372 }
373}
374
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530375static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +0530376 bool local, bool remote, phys_addr_t addr,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530377 size_t size)
378{
Bjorn Andersson715d8522020-03-05 01:17:28 +0530379 struct qcom_scm_vmperm next[2];
380 int perms = 0;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530381
382 if (!qproc->need_mem_protection)
383 return 0;
Bjorn Andersson715d8522020-03-05 01:17:28 +0530384
385 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
386 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530387 return 0;
388
Bjorn Andersson715d8522020-03-05 01:17:28 +0530389 if (local) {
390 next[perms].vmid = QCOM_SCM_VMID_HLOS;
391 next[perms].perm = QCOM_SCM_PERM_RWX;
392 perms++;
393 }
394
395 if (remote) {
396 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
397 next[perms].perm = QCOM_SCM_PERM_RW;
398 perms++;
399 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530400
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800401 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
Bjorn Andersson715d8522020-03-05 01:17:28 +0530402 current_perm, next, perms);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530403}
404
Bjorn Andersson051fb702016-06-20 14:28:41 -0700405static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
406{
407 struct q6v5 *qproc = rproc->priv;
408
409 memcpy(qproc->mba_region, fw->data, fw->size);
410
411 return 0;
412}
413
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530414static int q6v5_reset_assert(struct q6v5 *qproc)
415{
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530416 int ret;
417
418 if (qproc->has_alt_reset) {
419 reset_control_assert(qproc->pdc_reset);
420 ret = reset_control_reset(qproc->mss_restart);
421 reset_control_deassert(qproc->pdc_reset);
Sibi Sankara9fdc792020-04-15 20:21:10 +0530422 } else if (qproc->has_spare_reg) {
Sibi Sankar600c39b2020-01-23 18:42:36 +0530423 /*
424 * When the AXI pipeline is being reset with the Q6 modem partly
425 * operational there is possibility of AXI valid signal to
426 * glitch, leading to spurious transactions and Q6 hangs. A work
427 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
Sibi Sankara9fdc792020-04-15 20:21:10 +0530428 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
429 * is withdrawn post MSS assert followed by a MSS deassert,
430 * while holding the PDC reset.
Sibi Sankar600c39b2020-01-23 18:42:36 +0530431 */
Sibi Sankar6439b522019-12-19 11:15:06 +0530432 reset_control_assert(qproc->pdc_reset);
433 regmap_update_bits(qproc->conn_map, qproc->conn_box,
Sibi Sankar600c39b2020-01-23 18:42:36 +0530434 AXI_GATING_VALID_OVERRIDE, 1);
Sibi Sankar6439b522019-12-19 11:15:06 +0530435 reset_control_assert(qproc->mss_restart);
436 reset_control_deassert(qproc->pdc_reset);
437 regmap_update_bits(qproc->conn_map, qproc->conn_box,
Sibi Sankar600c39b2020-01-23 18:42:36 +0530438 AXI_GATING_VALID_OVERRIDE, 0);
Sibi Sankar6439b522019-12-19 11:15:06 +0530439 ret = reset_control_deassert(qproc->mss_restart);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530440 } else {
441 ret = reset_control_assert(qproc->mss_restart);
442 }
443
444 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530445}
446
447static int q6v5_reset_deassert(struct q6v5 *qproc)
448{
Sibi Sankar231f67d2018-05-21 22:57:13 +0530449 int ret;
450
451 if (qproc->has_alt_reset) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530452 reset_control_assert(qproc->pdc_reset);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530453 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
454 ret = reset_control_reset(qproc->mss_restart);
455 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530456 reset_control_deassert(qproc->pdc_reset);
Sibi Sankara9fdc792020-04-15 20:21:10 +0530457 } else if (qproc->has_spare_reg) {
Sibi Sankar6439b522019-12-19 11:15:06 +0530458 ret = reset_control_reset(qproc->mss_restart);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530459 } else {
460 ret = reset_control_deassert(qproc->mss_restart);
461 }
462
463 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530464}
465
Bjorn Andersson051fb702016-06-20 14:28:41 -0700466static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
467{
468 unsigned long timeout;
469 s32 val;
470
471 timeout = jiffies + msecs_to_jiffies(ms);
472 for (;;) {
473 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
474 if (val)
475 break;
476
477 if (time_after(jiffies, timeout))
478 return -ETIMEDOUT;
479
480 msleep(1);
481 }
482
483 return val;
484}
485
486static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
487{
488
489 unsigned long timeout;
490 s32 val;
491
492 timeout = jiffies + msecs_to_jiffies(ms);
493 for (;;) {
494 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
495 if (val < 0)
496 break;
497
498 if (!status && val)
499 break;
500 else if (status && val == status)
501 break;
502
503 if (time_after(jiffies, timeout))
504 return -ETIMEDOUT;
505
506 msleep(1);
507 }
508
509 return val;
510}
511
512static int q6v5proc_reset(struct q6v5 *qproc)
513{
514 u32 val;
515 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530516 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700517
Sibi Sankar231f67d2018-05-21 22:57:13 +0530518 if (qproc->version == MSS_SDM845) {
519 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530520 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530521 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700522
Sibi Sankar231f67d2018-05-21 22:57:13 +0530523 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530524 val, !(val & Q6SS_CBCR_CLKOFF), 1,
525 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530526 if (ret) {
527 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
528 return -ETIMEDOUT;
529 }
530
531 /* De-assert QDSP6 stop core */
532 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
533 /* Trigger boot FSM */
534 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
535
536 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
537 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
538 if (ret) {
539 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
540 /* Reset the modem so that boot FSM is in reset state */
541 q6v5_reset_deassert(qproc);
542 return ret;
543 }
544
545 goto pbl_wait;
Sibi Sankar6439b522019-12-19 11:15:06 +0530546 } else if (qproc->version == MSS_SC7180) {
547 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530548 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530549 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
550
551 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530552 val, !(val & Q6SS_CBCR_CLKOFF), 1,
553 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530554 if (ret) {
555 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
556 return -ETIMEDOUT;
557 }
558
559 /* Turn on the XO clock needed for PLL setup */
560 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530561 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530562 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
563
564 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530565 val, !(val & Q6SS_CBCR_CLKOFF), 1,
566 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530567 if (ret) {
568 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
569 return -ETIMEDOUT;
570 }
571
572 /* Configure Q6 core CBCR to auto-enable after reset sequence */
573 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530574 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530575 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
576
577 /* De-assert the Q6 stop core signal */
578 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
579
Sibi Sankar4e6751a2020-07-16 17:35:14 +0530580 /* Wait for 10 us for any staggering logic to settle */
581 usleep_range(10, 20);
582
Sibi Sankar6439b522019-12-19 11:15:06 +0530583 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
584 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
585
Sibi Sankar4e6751a2020-07-16 17:35:14 +0530586 /* Poll the MSS_STATUS for FSM completion */
587 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
588 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
Sibi Sankar6439b522019-12-19 11:15:06 +0530589 if (ret) {
590 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
591 /* Reset the modem so that boot FSM is in reset state */
592 q6v5_reset_deassert(qproc);
593 return ret;
594 }
595 goto pbl_wait;
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700596 } else if (qproc->version == MSS_MSM8996 ||
597 qproc->version == MSS_MSM8998) {
598 int mem_pwr_ctl;
599
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530600 /* Override the ACC value if required */
601 writel(QDSP6SS_ACC_OVERRIDE_VAL,
602 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700603
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530604 /* Assert resets, stop core */
605 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
606 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
607 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700608
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530609 /* BHS require xo cbcr to be enabled */
610 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530611 val |= Q6SS_CBCR_CLKEN;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530612 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
613
614 /* Read CLKOFF bit to go low indicating CLK is enabled */
615 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530616 val, !(val & Q6SS_CBCR_CLKOFF), 1,
617 Q6SS_CBCR_TIMEOUT_US);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530618 if (ret) {
619 dev_err(qproc->dev,
620 "xo cbcr enabling timed out (rc:%d)\n", ret);
621 return ret;
622 }
623 /* Enable power block headswitch and wait for it to stabilize */
624 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
625 val |= QDSP6v56_BHS_ON;
626 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
627 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
628 udelay(1);
629
630 /* Put LDO in bypass mode */
631 val |= QDSP6v56_LDO_BYP;
632 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
633
634 /* Deassert QDSP6 compiler memory clamp */
635 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
636 val &= ~QDSP6v56_CLAMP_QMC_MEM;
637 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
638
639 /* Deassert memory peripheral sleep and L2 memory standby */
640 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
641 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
642
643 /* Turn on L1, L2, ETB and JU memories 1 at a time */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700644 if (qproc->version == MSS_MSM8996) {
645 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
646 i = 19;
647 } else {
648 /* MSS_MSM8998 */
649 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
650 i = 28;
651 }
652 val = readl(qproc->reg_base + mem_pwr_ctl);
653 for (; i >= 0; i--) {
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530654 val |= BIT(i);
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700655 writel(val, qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530656 /*
657 * Read back value to ensure the write is done then
658 * wait for 1us for both memory peripheral and data
659 * array to turn on.
660 */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700661 val |= readl(qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530662 udelay(1);
663 }
664 /* Remove word line clamp */
665 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
666 val &= ~QDSP6v56_CLAMP_WL;
667 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
668 } else {
669 /* Assert resets, stop core */
670 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
671 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
672 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
673
674 /* Enable power block headswitch and wait for it to stabilize */
675 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
676 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
677 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
678 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
679 udelay(1);
680 /*
681 * Turn on memories. L2 banks should be done individually
682 * to minimize inrush current.
683 */
684 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
685 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
686 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
687 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
688 val |= Q6SS_L2DATA_SLP_NRET_N_2;
689 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
690 val |= Q6SS_L2DATA_SLP_NRET_N_1;
691 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
692 val |= Q6SS_L2DATA_SLP_NRET_N_0;
693 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
694 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700695 /* Remove IO clamp */
696 val &= ~Q6SS_CLAMP_IO;
697 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
698
699 /* Bring core out of reset */
700 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
701 val &= ~Q6SS_CORE_ARES;
702 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
703
704 /* Turn on core clock */
705 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
706 val |= Q6SS_CLK_ENABLE;
707 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
708
709 /* Start core execution */
710 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
711 val &= ~Q6SS_STOP_CORE;
712 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
713
Sibi Sankar231f67d2018-05-21 22:57:13 +0530714pbl_wait:
Bjorn Andersson051fb702016-06-20 14:28:41 -0700715 /* Wait for PBL status */
716 ret = q6v5_rmb_pbl_wait(qproc, 1000);
717 if (ret == -ETIMEDOUT) {
718 dev_err(qproc->dev, "PBL boot timed out\n");
719 } else if (ret != RMB_PBL_SUCCESS) {
720 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
721 ret = -EINVAL;
722 } else {
723 ret = 0;
724 }
725
726 return ret;
727}
728
729static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
730 struct regmap *halt_map,
731 u32 offset)
732{
Bjorn Andersson051fb702016-06-20 14:28:41 -0700733 unsigned int val;
734 int ret;
735
736 /* Check if we're already idle */
737 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
738 if (!ret && val)
739 return;
740
741 /* Assert halt request */
742 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
743
744 /* Wait for halt */
Sibi Sankar01bf3fe2020-01-23 18:42:35 +0530745 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
746 val, 1000, HALT_ACK_TIMEOUT_US);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700747
748 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
749 if (ret || !val)
750 dev_err(qproc->dev, "port failed halt\n");
751
752 /* Clear halt request (port will remain halted until reset) */
753 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
754}
755
756static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
757{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700758 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700759 dma_addr_t phys;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700760 void *metadata;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530761 int mdata_perm;
762 int xferop_ret;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700763 size_t size;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700764 void *ptr;
765 int ret;
766
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700767 metadata = qcom_mdt_read_metadata(fw, &size);
768 if (IS_ERR(metadata))
769 return PTR_ERR(metadata);
770
771 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700772 if (!ptr) {
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700773 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700774 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
775 return -ENOMEM;
776 }
777
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700778 memcpy(ptr, metadata, size);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700779
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530780 /* Hypervisor mapping to access metadata by modem */
781 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Andersson715d8522020-03-05 01:17:28 +0530782 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
783 phys, size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800784 if (ret) {
785 dev_err(qproc->dev,
786 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100787 ret = -EAGAIN;
788 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800789 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530790
Bjorn Andersson051fb702016-06-20 14:28:41 -0700791 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
792 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
793
794 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
795 if (ret == -ETIMEDOUT)
796 dev_err(qproc->dev, "MPSS header authentication timed out\n");
797 else if (ret < 0)
798 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
799
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530800 /* Metadata authentication done, remove modem access */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530801 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
802 phys, size);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530803 if (xferop_ret)
804 dev_warn(qproc->dev,
805 "mdt buffer not reclaimed system may become unstable\n");
806
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100807free_dma_attrs:
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700808 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
809 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700810
811 return ret < 0 ? ret : 0;
812}
813
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800814static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
815{
816 if (phdr->p_type != PT_LOAD)
817 return false;
818
819 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
820 return false;
821
822 if (!phdr->p_memsz)
823 return false;
824
825 return true;
826}
827
Sibi Sankar03045302018-10-17 19:25:25 +0530828static int q6v5_mba_load(struct q6v5 *qproc)
829{
830 int ret;
831 int xfermemop_ret;
832
833 qcom_q6v5_prepare(&qproc->q6v5);
834
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800835 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
836 if (ret < 0) {
837 dev_err(qproc->dev, "failed to enable active power domains\n");
838 goto disable_irqs;
839 }
840
Rajendra Nayak4760a892019-01-30 16:39:30 -0800841 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
842 if (ret < 0) {
843 dev_err(qproc->dev, "failed to enable proxy power domains\n");
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800844 goto disable_active_pds;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800845 }
846
Sibi Sankar03045302018-10-17 19:25:25 +0530847 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
848 qproc->proxy_reg_count);
849 if (ret) {
850 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Rajendra Nayak4760a892019-01-30 16:39:30 -0800851 goto disable_proxy_pds;
Sibi Sankar03045302018-10-17 19:25:25 +0530852 }
853
854 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
855 qproc->proxy_clk_count);
856 if (ret) {
857 dev_err(qproc->dev, "failed to enable proxy clocks\n");
858 goto disable_proxy_reg;
859 }
860
861 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
862 qproc->active_reg_count);
863 if (ret) {
864 dev_err(qproc->dev, "failed to enable supplies\n");
865 goto disable_proxy_clk;
866 }
867
868 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
869 qproc->reset_clk_count);
870 if (ret) {
871 dev_err(qproc->dev, "failed to enable reset clocks\n");
872 goto disable_vdd;
873 }
874
875 ret = q6v5_reset_deassert(qproc);
876 if (ret) {
877 dev_err(qproc->dev, "failed to deassert mss restart\n");
878 goto disable_reset_clks;
879 }
880
881 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
882 qproc->active_clk_count);
883 if (ret) {
884 dev_err(qproc->dev, "failed to enable clocks\n");
885 goto assert_reset;
886 }
887
888 /* Assign MBA image access in DDR to q6 */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530889 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
Sibi Sankar03045302018-10-17 19:25:25 +0530890 qproc->mba_phys, qproc->mba_size);
891 if (ret) {
892 dev_err(qproc->dev,
893 "assigning Q6 access to mba memory failed: %d\n", ret);
894 goto disable_active_clks;
895 }
896
897 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
898
899 ret = q6v5proc_reset(qproc);
900 if (ret)
901 goto reclaim_mba;
902
903 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
904 if (ret == -ETIMEDOUT) {
905 dev_err(qproc->dev, "MBA boot timed out\n");
906 goto halt_axi_ports;
907 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
908 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
909 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
910 ret = -EINVAL;
911 goto halt_axi_ports;
912 }
913
914 qproc->dump_mba_loaded = true;
915 return 0;
916
917halt_axi_ports:
918 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
919 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
920 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
921
922reclaim_mba:
Bjorn Andersson715d8522020-03-05 01:17:28 +0530923 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
924 false, qproc->mba_phys,
Sibi Sankar03045302018-10-17 19:25:25 +0530925 qproc->mba_size);
926 if (xfermemop_ret) {
927 dev_err(qproc->dev,
928 "Failed to reclaim mba buffer, system may become unstable\n");
929 }
930
931disable_active_clks:
932 q6v5_clk_disable(qproc->dev, qproc->active_clks,
933 qproc->active_clk_count);
934assert_reset:
935 q6v5_reset_assert(qproc);
936disable_reset_clks:
937 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
938 qproc->reset_clk_count);
939disable_vdd:
940 q6v5_regulator_disable(qproc, qproc->active_regs,
941 qproc->active_reg_count);
942disable_proxy_clk:
943 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
944 qproc->proxy_clk_count);
945disable_proxy_reg:
946 q6v5_regulator_disable(qproc, qproc->proxy_regs,
947 qproc->proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -0800948disable_proxy_pds:
949 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800950disable_active_pds:
951 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +0530952disable_irqs:
953 qcom_q6v5_unprepare(&qproc->q6v5);
954
955 return ret;
956}
957
958static void q6v5_mba_reclaim(struct q6v5 *qproc)
959{
960 int ret;
961 u32 val;
962
963 qproc->dump_mba_loaded = false;
964
965 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
966 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
967 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
968 if (qproc->version == MSS_MSM8996) {
969 /*
970 * To avoid high MX current during LPASS/MSS restart.
971 */
972 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
973 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
974 QDSP6v56_CLAMP_QMC_MEM;
975 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
976 }
977
Sibi Sankar03045302018-10-17 19:25:25 +0530978 q6v5_reset_assert(qproc);
979
980 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
981 qproc->reset_clk_count);
982 q6v5_clk_disable(qproc->dev, qproc->active_clks,
983 qproc->active_clk_count);
984 q6v5_regulator_disable(qproc, qproc->active_regs,
985 qproc->active_reg_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800986 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +0530987
988 /* In case of failure or coredump scenario where reclaiming MBA memory
989 * could not happen reclaim it here.
990 */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530991 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
Sibi Sankar03045302018-10-17 19:25:25 +0530992 qproc->mba_phys,
993 qproc->mba_size);
994 WARN_ON(ret);
995
996 ret = qcom_q6v5_unprepare(&qproc->q6v5);
997 if (ret) {
Rajendra Nayak4760a892019-01-30 16:39:30 -0800998 q6v5_pds_disable(qproc, qproc->proxy_pds,
999 qproc->proxy_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301000 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1001 qproc->proxy_clk_count);
1002 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1003 qproc->proxy_reg_count);
1004 }
1005}
1006
Sibi Sankard96f2572020-03-05 01:17:29 +05301007static int q6v5_reload_mba(struct rproc *rproc)
1008{
1009 struct q6v5 *qproc = rproc->priv;
1010 const struct firmware *fw;
1011 int ret;
1012
1013 ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1014 if (ret < 0)
1015 return ret;
1016
1017 q6v5_load(rproc, fw);
1018 ret = q6v5_mba_load(qproc);
1019 release_firmware(fw);
1020
1021 return ret;
1022}
1023
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001024static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001025{
1026 const struct elf32_phdr *phdrs;
1027 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001028 const struct firmware *seg_fw;
1029 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001030 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001031 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001032 phys_addr_t boot_addr;
Stefan Agnerd7dc8992018-06-14 15:28:02 -07001033 phys_addr_t min_addr = PHYS_ADDR_MAX;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001034 phys_addr_t max_addr = 0;
Bjorn Andersson715d8522020-03-05 01:17:28 +05301035 u32 code_length;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001036 bool relocate = false;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301037 char *fw_name;
1038 size_t fw_name_len;
Bjorn Andersson01625cc52017-02-15 14:00:41 -08001039 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301040 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001041 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001042 int ret;
1043 int i;
1044
Sibi Sankara5a4e022019-01-15 01:20:01 +05301045 fw_name_len = strlen(qproc->hexagon_mdt_image);
1046 if (fw_name_len <= 4)
1047 return -EINVAL;
1048
1049 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1050 if (!fw_name)
1051 return -ENOMEM;
1052
1053 ret = request_firmware(&fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001054 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301055 dev_err(qproc->dev, "unable to load %s\n", fw_name);
1056 goto out;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001057 }
1058
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001059 /* Initialize the RMB validator */
1060 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1061
1062 ret = q6v5_mpss_init_image(qproc, fw);
1063 if (ret)
1064 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001065
1066 ehdr = (struct elf32_hdr *)fw->data;
1067 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001068
1069 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001070 phdr = &phdrs[i];
1071
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001072 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001073 continue;
1074
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001075 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1076 relocate = true;
1077
1078 if (phdr->p_paddr < min_addr)
1079 min_addr = phdr->p_paddr;
1080
1081 if (phdr->p_paddr + phdr->p_memsz > max_addr)
1082 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1083 }
1084
Bjorn Andersson900fc602020-03-05 01:17:27 +05301085 /**
1086 * In case of a modem subsystem restart on secure devices, the modem
1087 * memory can be reclaimed only after MBA is loaded. For modem cold
1088 * boot this will be a nop
1089 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301090 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301091 qproc->mpss_phys, qproc->mpss_size);
1092
Bjorn Andersson715d8522020-03-05 01:17:28 +05301093 /* Share ownership between Linux and MSS, during segment loading */
1094 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1095 qproc->mpss_phys, qproc->mpss_size);
1096 if (ret) {
1097 dev_err(qproc->dev,
1098 "assigning Q6 access to mpss memory failed: %d\n", ret);
1099 ret = -EAGAIN;
1100 goto release_firmware;
1101 }
1102
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001103 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Sibi Sankar3bf62eb2018-07-27 20:50:03 +05301104 qproc->mpss_reloc = mpss_reloc;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301105 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001106 for (i = 0; i < ehdr->e_phnum; i++) {
1107 phdr = &phdrs[i];
1108
1109 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001110 continue;
1111
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001112 offset = phdr->p_paddr - mpss_reloc;
1113 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1114 dev_err(qproc->dev, "segment outside memory range\n");
1115 ret = -EINVAL;
1116 goto release_firmware;
1117 }
1118
Sibi Sankarbe050a32020-04-15 12:46:18 +05301119 ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz);
1120 if (!ptr) {
1121 dev_err(qproc->dev,
1122 "unable to map memory region: %pa+%zx-%x\n",
1123 &qproc->mpss_phys, offset, phdr->p_memsz);
1124 goto release_firmware;
1125 }
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001126
Bjorn Anderssonf04b9132019-06-21 18:21:46 -07001127 if (phdr->p_filesz && phdr->p_offset < fw->size) {
1128 /* Firmware is large enough to be non-split */
1129 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1130 dev_err(qproc->dev,
1131 "failed to load segment %d from truncated file %s\n",
1132 i, fw_name);
1133 ret = -EINVAL;
Sibi Sankarbe050a32020-04-15 12:46:18 +05301134 iounmap(ptr);
Bjorn Anderssonf04b9132019-06-21 18:21:46 -07001135 goto release_firmware;
1136 }
1137
1138 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1139 } else if (phdr->p_filesz) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301140 /* Replace "xxx.xxx" with "xxx.bxx" */
1141 sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1142 ret = request_firmware(&seg_fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001143 if (ret) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301144 dev_err(qproc->dev, "failed to load %s\n", fw_name);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301145 iounmap(ptr);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001146 goto release_firmware;
1147 }
1148
1149 memcpy(ptr, seg_fw->data, seg_fw->size);
1150
1151 release_firmware(seg_fw);
1152 }
1153
1154 if (phdr->p_memsz > phdr->p_filesz) {
1155 memset(ptr + phdr->p_filesz, 0,
1156 phdr->p_memsz - phdr->p_filesz);
1157 }
Sibi Sankarbe050a32020-04-15 12:46:18 +05301158 iounmap(ptr);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001159 size += phdr->p_memsz;
Bjorn Andersson715d8522020-03-05 01:17:28 +05301160
1161 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1162 if (!code_length) {
1163 boot_addr = relocate ? qproc->mpss_phys : min_addr;
1164 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1165 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1166 }
1167 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1168
1169 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1170 if (ret < 0) {
1171 dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1172 ret);
1173 goto release_firmware;
1174 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001175 }
1176
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301177 /* Transfer ownership of modem ddr region to q6 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301178 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301179 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001180 if (ret) {
1181 dev_err(qproc->dev,
1182 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +01001183 ret = -EAGAIN;
1184 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001185 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301186
Bjorn Andersson72beb492016-07-12 17:15:45 -07001187 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1188 if (ret == -ETIMEDOUT)
1189 dev_err(qproc->dev, "MPSS authentication timed out\n");
1190 else if (ret < 0)
1191 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1192
Bjorn Anderssond4c78d22020-06-22 12:19:40 -07001193 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
1194
Bjorn Andersson051fb702016-06-20 14:28:41 -07001195release_firmware:
1196 release_firmware(fw);
Sibi Sankara5a4e022019-01-15 01:20:01 +05301197out:
1198 kfree(fw_name);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001199
1200 return ret < 0 ? ret : 0;
1201}
1202
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301203static void qcom_q6v5_dump_segment(struct rproc *rproc,
1204 struct rproc_dump_segment *segment,
1205 void *dest)
1206{
1207 int ret = 0;
1208 struct q6v5 *qproc = rproc->priv;
1209 unsigned long mask = BIT((unsigned long)segment->priv);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301210 int offset = segment->da - qproc->mpss_reloc;
1211 void *ptr = NULL;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301212
1213 /* Unlock mba before copying segments */
Bjorn Andersson900fc602020-03-05 01:17:27 +05301214 if (!qproc->dump_mba_loaded) {
Sibi Sankard96f2572020-03-05 01:17:29 +05301215 ret = q6v5_reload_mba(rproc);
Bjorn Andersson900fc602020-03-05 01:17:27 +05301216 if (!ret) {
1217 /* Reset ownership back to Linux to copy segments */
1218 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +05301219 true, false,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301220 qproc->mpss_phys,
1221 qproc->mpss_size);
1222 }
1223 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301224
Sibi Sankarbe050a32020-04-15 12:46:18 +05301225 if (!ret)
1226 ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
1227
1228 if (ptr) {
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301229 memcpy(dest, ptr, segment->size);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301230 iounmap(ptr);
1231 } else {
1232 memset(dest, 0xff, segment->size);
1233 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301234
1235 qproc->dump_segment_mask |= mask;
1236
1237 /* Reclaim mba after copying segments */
1238 if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
Bjorn Andersson900fc602020-03-05 01:17:27 +05301239 if (qproc->dump_mba_loaded) {
1240 /* Try to reset ownership back to Q6 */
1241 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +05301242 false, true,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301243 qproc->mpss_phys,
1244 qproc->mpss_size);
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301245 q6v5_mba_reclaim(qproc);
Bjorn Andersson900fc602020-03-05 01:17:27 +05301246 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301247 }
1248}
1249
Bjorn Andersson051fb702016-06-20 14:28:41 -07001250static int q6v5_start(struct rproc *rproc)
1251{
1252 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301253 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001254 int ret;
1255
Sibi Sankar03045302018-10-17 19:25:25 +05301256 ret = q6v5_mba_load(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001257 if (ret)
Sibi Sankar03045302018-10-17 19:25:25 +05301258 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001259
1260 dev_info(qproc->dev, "MBA booted, loading mpss\n");
1261
1262 ret = q6v5_mpss_load(qproc);
1263 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301264 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001265
Bjorn Andersson7d674732018-06-04 13:30:38 -07001266 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1267 if (ret == -ETIMEDOUT) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001268 dev_err(qproc->dev, "start timed out\n");
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301269 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001270 }
1271
Bjorn Andersson715d8522020-03-05 01:17:28 +05301272 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1273 false, qproc->mba_phys,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301274 qproc->mba_size);
1275 if (xfermemop_ret)
1276 dev_err(qproc->dev,
1277 "Failed to reclaim mba buffer system may become unstable\n");
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301278
1279 /* Reset Dump Segment Mask */
1280 qproc->dump_segment_mask = 0;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001281 qproc->running = true;
1282
Bjorn Andersson051fb702016-06-20 14:28:41 -07001283 return 0;
1284
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301285reclaim_mpss:
Sibi Sankar03045302018-10-17 19:25:25 +05301286 q6v5_mba_reclaim(qproc);
Sibi Sankar663e9842018-05-21 22:57:09 +05301287
Bjorn Andersson051fb702016-06-20 14:28:41 -07001288 return ret;
1289}
1290
1291static int q6v5_stop(struct rproc *rproc)
1292{
1293 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1294 int ret;
1295
1296 qproc->running = false;
1297
Bjorn Andersson7d674732018-06-04 13:30:38 -07001298 ret = qcom_q6v5_request_stop(&qproc->q6v5);
1299 if (ret == -ETIMEDOUT)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001300 dev_err(qproc->dev, "timed out on wait\n");
1301
Sibi Sankar03045302018-10-17 19:25:25 +05301302 q6v5_mba_reclaim(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001303
1304 return 0;
1305}
1306
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301307static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1308 const struct firmware *mba_fw)
1309{
1310 const struct firmware *fw;
1311 const struct elf32_phdr *phdrs;
1312 const struct elf32_phdr *phdr;
1313 const struct elf32_hdr *ehdr;
1314 struct q6v5 *qproc = rproc->priv;
1315 unsigned long i;
1316 int ret;
1317
Sibi Sankara5a4e022019-01-15 01:20:01 +05301318 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301319 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301320 dev_err(qproc->dev, "unable to load %s\n",
1321 qproc->hexagon_mdt_image);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301322 return ret;
1323 }
1324
Clement Leger3898fc92020-04-10 12:24:33 +02001325 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1326
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301327 ehdr = (struct elf32_hdr *)fw->data;
1328 phdrs = (struct elf32_phdr *)(ehdr + 1);
1329 qproc->dump_complete_mask = 0;
1330
1331 for (i = 0; i < ehdr->e_phnum; i++) {
1332 phdr = &phdrs[i];
1333
1334 if (!q6v5_phdr_valid(phdr))
1335 continue;
1336
1337 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1338 phdr->p_memsz,
1339 qcom_q6v5_dump_segment,
1340 (void *)i);
1341 if (ret)
1342 break;
1343
1344 qproc->dump_complete_mask |= BIT(i);
1345 }
1346
1347 release_firmware(fw);
1348 return ret;
1349}
1350
Bjorn Andersson051fb702016-06-20 14:28:41 -07001351static const struct rproc_ops q6v5_ops = {
1352 .start = q6v5_start,
1353 .stop = q6v5_stop,
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301354 .parse_fw = qcom_q6v5_register_dump_segments,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -08001355 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -07001356};
1357
Bjorn Andersson7d674732018-06-04 13:30:38 -07001358static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001359{
Bjorn Andersson7d674732018-06-04 13:30:38 -07001360 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +05301361
1362 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1363 qproc->proxy_clk_count);
1364 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1365 qproc->proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001366 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001367}
1368
1369static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1370{
1371 struct of_phandle_args args;
1372 struct resource *res;
1373 int ret;
1374
1375 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1376 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001377 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001378 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001379
1380 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1381 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001382 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001383 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001384
1385 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1386 "qcom,halt-regs", 3, 0, &args);
1387 if (ret < 0) {
1388 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1389 return -EINVAL;
1390 }
1391
1392 qproc->halt_map = syscon_node_to_regmap(args.np);
1393 of_node_put(args.np);
1394 if (IS_ERR(qproc->halt_map))
1395 return PTR_ERR(qproc->halt_map);
1396
1397 qproc->halt_q6 = args.args[0];
1398 qproc->halt_modem = args.args[1];
1399 qproc->halt_nc = args.args[2];
1400
Sibi Sankara9fdc792020-04-15 20:21:10 +05301401 if (qproc->has_spare_reg) {
Sibi Sankar6439b522019-12-19 11:15:06 +05301402 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301403 "qcom,spare-regs",
Sibi Sankar6439b522019-12-19 11:15:06 +05301404 1, 0, &args);
1405 if (ret < 0) {
Sibi Sankara9fdc792020-04-15 20:21:10 +05301406 dev_err(&pdev->dev, "failed to parse spare-regs\n");
Sibi Sankar6439b522019-12-19 11:15:06 +05301407 return -EINVAL;
1408 }
1409
1410 qproc->conn_map = syscon_node_to_regmap(args.np);
1411 of_node_put(args.np);
1412 if (IS_ERR(qproc->conn_map))
1413 return PTR_ERR(qproc->conn_map);
1414
1415 qproc->conn_box = args.args[0];
1416 }
1417
Bjorn Andersson051fb702016-06-20 14:28:41 -07001418 return 0;
1419}
1420
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301421static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1422 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001423{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301424 int i;
1425
1426 if (!clk_names)
1427 return 0;
1428
1429 for (i = 0; clk_names[i]; i++) {
1430 clks[i] = devm_clk_get(dev, clk_names[i]);
1431 if (IS_ERR(clks[i])) {
1432 int rc = PTR_ERR(clks[i]);
1433
1434 if (rc != -EPROBE_DEFER)
1435 dev_err(dev, "Failed to get %s clock\n",
1436 clk_names[i]);
1437 return rc;
1438 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001439 }
1440
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301441 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001442}
1443
Rajendra Nayak4760a892019-01-30 16:39:30 -08001444static int q6v5_pds_attach(struct device *dev, struct device **devs,
1445 char **pd_names)
1446{
1447 size_t num_pds = 0;
1448 int ret;
1449 int i;
1450
1451 if (!pd_names)
1452 return 0;
1453
1454 while (pd_names[num_pds])
1455 num_pds++;
1456
1457 for (i = 0; i < num_pds; i++) {
1458 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
Sibi Sankarf2583fd2019-08-21 23:35:48 +05301459 if (IS_ERR_OR_NULL(devs[i])) {
1460 ret = PTR_ERR(devs[i]) ? : -ENODATA;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001461 goto unroll_attach;
1462 }
1463 }
1464
1465 return num_pds;
1466
1467unroll_attach:
1468 for (i--; i >= 0; i--)
1469 dev_pm_domain_detach(devs[i], false);
1470
1471 return ret;
Alex Elder58396812020-04-03 12:50:05 -05001472}
Rajendra Nayak4760a892019-01-30 16:39:30 -08001473
1474static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1475 size_t pd_count)
1476{
1477 int i;
1478
1479 for (i = 0; i < pd_count; i++)
1480 dev_pm_domain_detach(pds[i], false);
1481}
1482
Bjorn Andersson051fb702016-06-20 14:28:41 -07001483static int q6v5_init_reset(struct q6v5 *qproc)
1484{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001485 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
Sibi Sankar9e483ef2018-08-30 00:42:14 +05301486 "mss_restart");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001487 if (IS_ERR(qproc->mss_restart)) {
1488 dev_err(qproc->dev, "failed to acquire mss restart\n");
1489 return PTR_ERR(qproc->mss_restart);
1490 }
1491
Sibi Sankara9fdc792020-04-15 20:21:10 +05301492 if (qproc->has_alt_reset || qproc->has_spare_reg) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301493 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1494 "pdc_reset");
1495 if (IS_ERR(qproc->pdc_reset)) {
1496 dev_err(qproc->dev, "failed to acquire pdc reset\n");
1497 return PTR_ERR(qproc->pdc_reset);
1498 }
1499 }
1500
Bjorn Andersson051fb702016-06-20 14:28:41 -07001501 return 0;
1502}
1503
Bjorn Andersson051fb702016-06-20 14:28:41 -07001504static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1505{
1506 struct device_node *child;
1507 struct device_node *node;
1508 struct resource r;
1509 int ret;
1510
Sibi Sankar6663ce62020-04-21 20:02:25 +05301511 /*
1512 * In the absence of mba/mpss sub-child, extract the mba and mpss
1513 * reserved memory regions from device's memory-region property.
1514 */
Bjorn Andersson051fb702016-06-20 14:28:41 -07001515 child = of_get_child_by_name(qproc->dev->of_node, "mba");
Sibi Sankar6663ce62020-04-21 20:02:25 +05301516 if (!child)
1517 node = of_parse_phandle(qproc->dev->of_node,
1518 "memory-region", 0);
1519 else
1520 node = of_parse_phandle(child, "memory-region", 0);
1521
Bjorn Andersson051fb702016-06-20 14:28:41 -07001522 ret = of_address_to_resource(node, 0, &r);
1523 if (ret) {
1524 dev_err(qproc->dev, "unable to resolve mba region\n");
1525 return ret;
1526 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001527 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001528
1529 qproc->mba_phys = r.start;
1530 qproc->mba_size = resource_size(&r);
1531 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1532 if (!qproc->mba_region) {
1533 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1534 &r.start, qproc->mba_size);
1535 return -EBUSY;
1536 }
1537
Sibi Sankar6663ce62020-04-21 20:02:25 +05301538 if (!child) {
1539 node = of_parse_phandle(qproc->dev->of_node,
1540 "memory-region", 1);
1541 } else {
1542 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1543 node = of_parse_phandle(child, "memory-region", 0);
1544 }
1545
Bjorn Andersson051fb702016-06-20 14:28:41 -07001546 ret = of_address_to_resource(node, 0, &r);
1547 if (ret) {
1548 dev_err(qproc->dev, "unable to resolve mpss region\n");
1549 return ret;
1550 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001551 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001552
1553 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1554 qproc->mpss_size = resource_size(&r);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001555
1556 return 0;
1557}
1558
Alex Elderd7f5f3c2020-03-05 22:28:15 -06001559#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
1560
1561/* Register IPA notification function */
1562int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
1563 void *data)
1564{
1565 struct qcom_rproc_ipa_notify *ipa_notify;
1566 struct q6v5 *qproc = rproc->priv;
1567
1568 if (!notify)
1569 return -EINVAL;
1570
1571 ipa_notify = &qproc->ipa_notify_subdev;
1572 if (ipa_notify->notify)
1573 return -EBUSY;
1574
1575 ipa_notify->notify = notify;
1576 ipa_notify->data = data;
1577
1578 return 0;
1579}
1580EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
1581
1582/* Deregister IPA notification function */
1583void qcom_deregister_ipa_notify(struct rproc *rproc)
1584{
1585 struct q6v5 *qproc = rproc->priv;
1586
1587 qproc->ipa_notify_subdev.notify = NULL;
1588}
1589EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
1590#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
1591
Bjorn Andersson051fb702016-06-20 14:28:41 -07001592static int q6v5_probe(struct platform_device *pdev)
1593{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301594 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001595 struct q6v5 *qproc;
1596 struct rproc *rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301597 const char *mba_image;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001598 int ret;
1599
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301600 desc = of_device_get_match_data(&pdev->dev);
1601 if (!desc)
1602 return -EINVAL;
1603
Brian Norrisbbcda302018-10-08 19:08:05 -07001604 if (desc->need_mem_protection && !qcom_scm_is_available())
1605 return -EPROBE_DEFER;
1606
Sibi Sankara5a4e022019-01-15 01:20:01 +05301607 mba_image = desc->hexagon_mba_image;
1608 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1609 0, &mba_image);
1610 if (ret < 0 && ret != -EINVAL)
1611 return ret;
1612
Bjorn Andersson051fb702016-06-20 14:28:41 -07001613 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Sibi Sankara5a4e022019-01-15 01:20:01 +05301614 mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001615 if (!rproc) {
1616 dev_err(&pdev->dev, "failed to allocate rproc\n");
1617 return -ENOMEM;
1618 }
1619
Ramon Fried41071022018-05-24 22:21:41 +03001620 rproc->auto_boot = false;
Clement Leger3898fc92020-04-10 12:24:33 +02001621 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
Ramon Fried41071022018-05-24 22:21:41 +03001622
Bjorn Andersson051fb702016-06-20 14:28:41 -07001623 qproc = (struct q6v5 *)rproc->priv;
1624 qproc->dev = &pdev->dev;
1625 qproc->rproc = rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301626 qproc->hexagon_mdt_image = "modem.mdt";
1627 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1628 1, &qproc->hexagon_mdt_image);
1629 if (ret < 0 && ret != -EINVAL)
Alex Elder13c060b2020-04-03 12:50:04 -05001630 goto free_rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301631
Bjorn Andersson051fb702016-06-20 14:28:41 -07001632 platform_set_drvdata(pdev, qproc);
1633
Sibi Sankara9fdc792020-04-15 20:21:10 +05301634 qproc->has_spare_reg = desc->has_spare_reg;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001635 ret = q6v5_init_mem(qproc, pdev);
1636 if (ret)
1637 goto free_rproc;
1638
1639 ret = q6v5_alloc_memory_region(qproc);
1640 if (ret)
1641 goto free_rproc;
1642
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301643 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1644 desc->proxy_clk_names);
1645 if (ret < 0) {
1646 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001647 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301648 }
1649 qproc->proxy_clk_count = ret;
1650
Sibi Sankar231f67d2018-05-21 22:57:13 +05301651 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1652 desc->reset_clk_names);
1653 if (ret < 0) {
1654 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1655 goto free_rproc;
1656 }
1657 qproc->reset_clk_count = ret;
1658
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301659 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1660 desc->active_clk_names);
1661 if (ret < 0) {
1662 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1663 goto free_rproc;
1664 }
1665 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001666
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301667 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1668 desc->proxy_supply);
1669 if (ret < 0) {
1670 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001671 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301672 }
1673 qproc->proxy_reg_count = ret;
1674
1675 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1676 desc->active_supply);
1677 if (ret < 0) {
1678 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1679 goto free_rproc;
1680 }
1681 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001682
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001683 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1684 desc->active_pd_names);
1685 if (ret < 0) {
1686 dev_err(&pdev->dev, "Failed to attach active power domains\n");
1687 goto free_rproc;
1688 }
1689 qproc->active_pd_count = ret;
1690
Rajendra Nayak4760a892019-01-30 16:39:30 -08001691 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1692 desc->proxy_pd_names);
1693 if (ret < 0) {
1694 dev_err(&pdev->dev, "Failed to init power domains\n");
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001695 goto detach_active_pds;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001696 }
1697 qproc->proxy_pd_count = ret;
1698
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301699 qproc->has_alt_reset = desc->has_alt_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001700 ret = q6v5_init_reset(qproc);
1701 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001702 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001703
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301704 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301705 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson7d674732018-06-04 13:30:38 -07001706
1707 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1708 qcom_msa_handover);
1709 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001710 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001711
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301712 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1713 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Anderssoncd9fc8f2020-04-22 17:37:33 -07001714 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
Bjorn Andersson4b489212017-01-29 14:05:50 -08001715 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001716 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Alex Elderd7f5f3c2020-03-05 22:28:15 -06001717 qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001718 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
Sibi Sankar027045a2019-01-08 15:53:43 +05301719 if (IS_ERR(qproc->sysmon)) {
1720 ret = PTR_ERR(qproc->sysmon);
Alex Elder58396812020-04-03 12:50:05 -05001721 goto remove_subdevs;
Sibi Sankar027045a2019-01-08 15:53:43 +05301722 }
Bjorn Andersson4b489212017-01-29 14:05:50 -08001723
Bjorn Andersson051fb702016-06-20 14:28:41 -07001724 ret = rproc_add(rproc);
1725 if (ret)
Alex Elder58396812020-04-03 12:50:05 -05001726 goto remove_sysmon_subdev;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001727
1728 return 0;
1729
Alex Elder58396812020-04-03 12:50:05 -05001730remove_sysmon_subdev:
1731 qcom_remove_sysmon_subdev(qproc->sysmon);
1732remove_subdevs:
Alex Elderd7f5f3c2020-03-05 22:28:15 -06001733 qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
Alex Elder58396812020-04-03 12:50:05 -05001734 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1735 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1736 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
1737detach_proxy_pds:
Rajendra Nayak4760a892019-01-30 16:39:30 -08001738 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001739detach_active_pds:
1740 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001741free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001742 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001743
1744 return ret;
1745}
1746
1747static int q6v5_remove(struct platform_device *pdev)
1748{
1749 struct q6v5 *qproc = platform_get_drvdata(pdev);
Alex Elder58396812020-04-03 12:50:05 -05001750 struct rproc *rproc = qproc->rproc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001751
Alex Elder58396812020-04-03 12:50:05 -05001752 rproc_del(rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001753
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001754 qcom_remove_sysmon_subdev(qproc->sysmon);
Alex Elder58396812020-04-03 12:50:05 -05001755 qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
1756 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1757 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1758 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001759
1760 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Alex Elder58396812020-04-03 12:50:05 -05001761 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001762
Alex Elder58396812020-04-03 12:50:05 -05001763 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001764
1765 return 0;
1766}
1767
Sibi Sankar6439b522019-12-19 11:15:06 +05301768static const struct rproc_hexagon_res sc7180_mss = {
1769 .hexagon_mba_image = "mba.mbn",
1770 .proxy_clk_names = (char*[]){
1771 "xo",
1772 NULL
1773 },
1774 .reset_clk_names = (char*[]){
1775 "iface",
1776 "bus",
1777 "snoc_axi",
1778 NULL
1779 },
1780 .active_clk_names = (char*[]){
1781 "mnoc_axi",
1782 "nav",
Sibi Sankar6439b522019-12-19 11:15:06 +05301783 NULL
1784 },
1785 .active_pd_names = (char*[]){
1786 "load_state",
1787 NULL
1788 },
1789 .proxy_pd_names = (char*[]){
1790 "cx",
1791 "mx",
1792 "mss",
1793 NULL
1794 },
1795 .need_mem_protection = true,
1796 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301797 .has_spare_reg = true,
Sibi Sankar6439b522019-12-19 11:15:06 +05301798 .version = MSS_SC7180,
1799};
1800
Sibi Sankar231f67d2018-05-21 22:57:13 +05301801static const struct rproc_hexagon_res sdm845_mss = {
1802 .hexagon_mba_image = "mba.mbn",
1803 .proxy_clk_names = (char*[]){
1804 "xo",
Sibi Sankar231f67d2018-05-21 22:57:13 +05301805 "prng",
1806 NULL
1807 },
1808 .reset_clk_names = (char*[]){
1809 "iface",
1810 "snoc_axi",
1811 NULL
1812 },
1813 .active_clk_names = (char*[]){
1814 "bus",
1815 "mem",
1816 "gpll0_mss",
1817 "mnoc_axi",
1818 NULL
1819 },
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001820 .active_pd_names = (char*[]){
1821 "load_state",
1822 NULL
1823 },
Rajendra Nayak4760a892019-01-30 16:39:30 -08001824 .proxy_pd_names = (char*[]){
1825 "cx",
1826 "mx",
1827 "mss",
1828 NULL
1829 },
Sibi Sankar231f67d2018-05-21 22:57:13 +05301830 .need_mem_protection = true,
1831 .has_alt_reset = true,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301832 .has_spare_reg = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301833 .version = MSS_SDM845,
1834};
1835
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001836static const struct rproc_hexagon_res msm8998_mss = {
1837 .hexagon_mba_image = "mba.mbn",
1838 .proxy_clk_names = (char*[]){
1839 "xo",
1840 "qdss",
1841 "mem",
1842 NULL
1843 },
1844 .active_clk_names = (char*[]){
1845 "iface",
1846 "bus",
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001847 "gpll0_mss",
1848 "mnoc_axi",
1849 "snoc_axi",
1850 NULL
1851 },
1852 .proxy_pd_names = (char*[]){
1853 "cx",
1854 "mx",
1855 NULL
1856 },
1857 .need_mem_protection = true,
1858 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301859 .has_spare_reg = false,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001860 .version = MSS_MSM8998,
1861};
1862
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301863static const struct rproc_hexagon_res msm8996_mss = {
1864 .hexagon_mba_image = "mba.mbn",
Sibi Sankar47b87472018-12-29 00:23:05 +05301865 .proxy_supply = (struct qcom_mss_reg_res[]) {
1866 {
1867 .supply = "pll",
1868 .uA = 100000,
1869 },
1870 {}
1871 },
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301872 .proxy_clk_names = (char*[]){
1873 "xo",
1874 "pnoc",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301875 "qdss",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301876 NULL
1877 },
1878 .active_clk_names = (char*[]){
1879 "iface",
1880 "bus",
1881 "mem",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301882 "gpll0_mss",
1883 "snoc_axi",
1884 "mnoc_axi",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301885 NULL
1886 },
1887 .need_mem_protection = true,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301888 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301889 .has_spare_reg = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301890 .version = MSS_MSM8996,
1891};
1892
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301893static const struct rproc_hexagon_res msm8916_mss = {
1894 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301895 .proxy_supply = (struct qcom_mss_reg_res[]) {
1896 {
1897 .supply = "mx",
1898 .uV = 1050000,
1899 },
1900 {
1901 .supply = "cx",
1902 .uA = 100000,
1903 },
1904 {
1905 .supply = "pll",
1906 .uA = 100000,
1907 },
1908 {}
1909 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301910 .proxy_clk_names = (char*[]){
1911 "xo",
1912 NULL
1913 },
1914 .active_clk_names = (char*[]){
1915 "iface",
1916 "bus",
1917 "mem",
1918 NULL
1919 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301920 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301921 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301922 .has_spare_reg = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301923 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301924};
1925
1926static const struct rproc_hexagon_res msm8974_mss = {
1927 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301928 .proxy_supply = (struct qcom_mss_reg_res[]) {
1929 {
1930 .supply = "mx",
1931 .uV = 1050000,
1932 },
1933 {
1934 .supply = "cx",
1935 .uA = 100000,
1936 },
1937 {
1938 .supply = "pll",
1939 .uA = 100000,
1940 },
1941 {}
1942 },
1943 .active_supply = (struct qcom_mss_reg_res[]) {
1944 {
1945 .supply = "mss",
1946 .uV = 1050000,
1947 .uA = 100000,
1948 },
1949 {}
1950 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301951 .proxy_clk_names = (char*[]){
1952 "xo",
1953 NULL
1954 },
1955 .active_clk_names = (char*[]){
1956 "iface",
1957 "bus",
1958 "mem",
1959 NULL
1960 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301961 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301962 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301963 .has_spare_reg = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301964 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301965};
1966
Bjorn Andersson051fb702016-06-20 14:28:41 -07001967static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301968 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1969 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1970 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301971 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001972 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
Sibi Sankar6439b522019-12-19 11:15:06 +05301973 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
Sibi Sankar231f67d2018-05-21 22:57:13 +05301974 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001975 { },
1976};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001977MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001978
1979static struct platform_driver q6v5_driver = {
1980 .probe = q6v5_probe,
1981 .remove = q6v5_remove,
1982 .driver = {
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001983 .name = "qcom-q6v5-mss",
Bjorn Andersson051fb702016-06-20 14:28:41 -07001984 .of_match_table = q6v5_of_match,
1985 },
1986};
1987module_platform_driver(q6v5_driver);
1988
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001989MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001990MODULE_LICENSE("GPL v2");