blob: 1ee809b772ff3d879e8da72882a8b034ffbabe2e [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Bjorn Andersson051fb702016-06-20 14:28:41 -07002/*
Bjorn Anderssonef73c222018-09-24 16:45:26 -07003 * Qualcomm self-authenticating modem subsystem remoteproc driver
Bjorn Andersson051fb702016-06-20 14:28:41 -07004 *
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bjorn Andersson051fb702016-06-20 14:28:41 -07008 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
16#include <linux/module.h>
17#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053018#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070019#include <linux/platform_device.h>
Rajendra Nayak4760a892019-01-30 16:39:30 -080020#include <linux/pm_domain.h>
21#include <linux/pm_runtime.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070022#include <linux/regmap.h>
23#include <linux/regulator/consumer.h>
24#include <linux/remoteproc.h>
Alex Elderd7f5f3c2020-03-05 22:28:15 -060025#include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070026#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080027#include <linux/soc/qcom/mdt_loader.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053028#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070029
30#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080031#include "qcom_common.h"
Bjorn Anderssond4c78d22020-06-22 12:19:40 -070032#include "qcom_pil_info.h"
Bjorn Andersson7d674732018-06-04 13:30:38 -070033#include "qcom_q6v5.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070034
35#include <linux/qcom_scm.h>
36
Bjorn Andersson051fb702016-06-20 14:28:41 -070037#define MPSS_CRASH_REASON_SMEM 421
38
39/* RMB Status Register Values */
40#define RMB_PBL_SUCCESS 0x1
41
42#define RMB_MBA_XPU_UNLOCKED 0x1
43#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
44#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
45#define RMB_MBA_AUTH_COMPLETE 0x4
46
47/* PBL/MBA interface registers */
48#define RMB_MBA_IMAGE_REG 0x00
49#define RMB_PBL_STATUS_REG 0x04
50#define RMB_MBA_COMMAND_REG 0x08
51#define RMB_MBA_STATUS_REG 0x0C
52#define RMB_PMI_META_DATA_REG 0x10
53#define RMB_PMI_CODE_START_REG 0x14
54#define RMB_PMI_CODE_LENGTH_REG 0x18
Sibi Sankar231f67d2018-05-21 22:57:13 +053055#define RMB_MBA_MSS_STATUS 0x40
56#define RMB_MBA_ALT_RESET 0x44
Bjorn Andersson051fb702016-06-20 14:28:41 -070057
58#define RMB_CMD_META_DATA_READY 0x1
59#define RMB_CMD_LOAD_READY 0x2
60
61/* QDSP6SS Register Offsets */
62#define QDSP6SS_RESET_REG 0x014
63#define QDSP6SS_GFMUX_CTL_REG 0x020
64#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053065#define QDSP6SS_MEM_PWR_CTL 0x0B0
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -070066#define QDSP6V6SS_MEM_PWR_CTL 0x034
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053067#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070068
69/* AXI Halt Register Offsets */
70#define AXI_HALTREQ_REG 0x0
71#define AXI_HALTACK_REG 0x4
72#define AXI_IDLE_REG 0x8
Sibi Sankar600c39b2020-01-23 18:42:36 +053073#define AXI_GATING_VALID_OVERRIDE BIT(0)
Bjorn Andersson051fb702016-06-20 14:28:41 -070074
Sibi Sankar01bf3fe2020-01-23 18:42:35 +053075#define HALT_ACK_TIMEOUT_US 100000
Bjorn Andersson051fb702016-06-20 14:28:41 -070076
77/* QDSP6SS_RESET */
78#define Q6SS_STOP_CORE BIT(0)
79#define Q6SS_CORE_ARES BIT(1)
80#define Q6SS_BUS_ARES_ENABLE BIT(2)
81
Sibi Sankar7e0f8682020-01-17 19:21:28 +053082/* QDSP6SS CBCR */
83#define Q6SS_CBCR_CLKEN BIT(0)
84#define Q6SS_CBCR_CLKOFF BIT(31)
85#define Q6SS_CBCR_TIMEOUT_US 200
86
Bjorn Andersson051fb702016-06-20 14:28:41 -070087/* QDSP6SS_GFMUX_CTL */
88#define Q6SS_CLK_ENABLE BIT(1)
89
90/* QDSP6SS_PWR_CTL */
91#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
92#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
93#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
94#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
95#define Q6SS_ETB_SLP_NRET_N BIT(17)
96#define Q6SS_L2DATA_STBY_N BIT(18)
97#define Q6SS_SLP_RET_N BIT(19)
98#define Q6SS_CLAMP_IO BIT(20)
99#define QDSS_BHS_ON BIT(21)
100#define QDSS_LDO_BYP BIT(22)
101
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530102/* QDSP6v56 parameters */
103#define QDSP6v56_LDO_BYP BIT(25)
104#define QDSP6v56_BHS_ON BIT(24)
105#define QDSP6v56_CLAMP_WL BIT(21)
106#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530107#define QDSP6SS_XO_CBCR 0x0038
108#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
109
Sibi Sankar231f67d2018-05-21 22:57:13 +0530110/* QDSP6v65 parameters */
Sibi Sankar6439b522019-12-19 11:15:06 +0530111#define QDSP6SS_CORE_CBCR 0x20
Sibi Sankar231f67d2018-05-21 22:57:13 +0530112#define QDSP6SS_SLEEP 0x3C
113#define QDSP6SS_BOOT_CORE_START 0x400
114#define QDSP6SS_BOOT_CMD 0x404
Sibi Sankar231f67d2018-05-21 22:57:13 +0530115#define BOOT_FSM_TIMEOUT 10000
116
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530117struct reg_info {
118 struct regulator *reg;
119 int uV;
120 int uA;
121};
122
123struct qcom_mss_reg_res {
124 const char *supply;
125 int uV;
126 int uA;
127};
128
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530129struct rproc_hexagon_res {
130 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100131 struct qcom_mss_reg_res *proxy_supply;
132 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530133 char **proxy_clk_names;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530134 char **reset_clk_names;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530135 char **active_clk_names;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800136 char **active_pd_names;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800137 char **proxy_pd_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530138 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530139 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530140 bool has_alt_reset;
Sibi Sankara9fdc792020-04-15 20:21:10 +0530141 bool has_spare_reg;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530142};
143
Bjorn Andersson051fb702016-06-20 14:28:41 -0700144struct q6v5 {
145 struct device *dev;
146 struct rproc *rproc;
147
148 void __iomem *reg_base;
149 void __iomem *rmb_base;
150
151 struct regmap *halt_map;
Sibi Sankar6439b522019-12-19 11:15:06 +0530152 struct regmap *conn_map;
153
Bjorn Andersson051fb702016-06-20 14:28:41 -0700154 u32 halt_q6;
155 u32 halt_modem;
156 u32 halt_nc;
Sibi Sankar6439b522019-12-19 11:15:06 +0530157 u32 conn_box;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700158
159 struct reset_control *mss_restart;
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530160 struct reset_control *pdc_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700161
Bjorn Andersson7d674732018-06-04 13:30:38 -0700162 struct qcom_q6v5 q6v5;
Sibi Sankar663e9842018-05-21 22:57:09 +0530163
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530164 struct clk *active_clks[8];
Sibi Sankar231f67d2018-05-21 22:57:13 +0530165 struct clk *reset_clks[4];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530166 struct clk *proxy_clks[4];
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800167 struct device *active_pds[1];
Rajendra Nayak4760a892019-01-30 16:39:30 -0800168 struct device *proxy_pds[3];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530169 int active_clk_count;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530170 int reset_clk_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530171 int proxy_clk_count;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800172 int active_pd_count;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800173 int proxy_pd_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530174
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530175 struct reg_info active_regs[1];
176 struct reg_info proxy_regs[3];
177 int active_reg_count;
178 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700179
Bjorn Andersson051fb702016-06-20 14:28:41 -0700180 bool running;
181
Sibi Sankar03045302018-10-17 19:25:25 +0530182 bool dump_mba_loaded;
Sibi Sankar7ac516d2020-07-16 15:20:32 -0700183 size_t current_dump_size;
184 size_t total_dump_size;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +0530185
Bjorn Andersson051fb702016-06-20 14:28:41 -0700186 phys_addr_t mba_phys;
187 void *mba_region;
188 size_t mba_size;
189
190 phys_addr_t mpss_phys;
191 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700192 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800193
Sibi Sankar47254962018-05-21 22:57:14 +0530194 struct qcom_rproc_glink glink_subdev;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800195 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700196 struct qcom_rproc_ssr ssr_subdev;
Alex Elderd7f5f3c2020-03-05 22:28:15 -0600197 struct qcom_rproc_ipa_notify ipa_notify_subdev;
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700198 struct qcom_sysmon *sysmon;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530199 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530200 bool has_alt_reset;
Sibi Sankara9fdc792020-04-15 20:21:10 +0530201 bool has_spare_reg;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530202 int mpss_perm;
203 int mba_perm;
Sibi Sankara5a4e022019-01-15 01:20:01 +0530204 const char *hexagon_mdt_image;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530205 int version;
206};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530207
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530208enum {
209 MSS_MSM8916,
210 MSS_MSM8974,
211 MSS_MSM8996,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700212 MSS_MSM8998,
Sibi Sankar6439b522019-12-19 11:15:06 +0530213 MSS_SC7180,
Sibi Sankar231f67d2018-05-21 22:57:13 +0530214 MSS_SDM845,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700215};
216
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530217static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
218 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700219{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530220 int rc;
221 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700222
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800223 if (!reg_res)
224 return 0;
225
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530226 for (i = 0; reg_res[i].supply; i++) {
227 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
228 if (IS_ERR(regs[i].reg)) {
229 rc = PTR_ERR(regs[i].reg);
230 if (rc != -EPROBE_DEFER)
231 dev_err(dev, "Failed to get %s\n regulator",
232 reg_res[i].supply);
233 return rc;
234 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700235
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530236 regs[i].uV = reg_res[i].uV;
237 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700238 }
239
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530240 return i;
241}
242
243static int q6v5_regulator_enable(struct q6v5 *qproc,
244 struct reg_info *regs, int count)
245{
246 int ret;
247 int i;
248
249 for (i = 0; i < count; i++) {
250 if (regs[i].uV > 0) {
251 ret = regulator_set_voltage(regs[i].reg,
252 regs[i].uV, INT_MAX);
253 if (ret) {
254 dev_err(qproc->dev,
255 "Failed to request voltage for %d.\n",
256 i);
257 goto err;
258 }
259 }
260
261 if (regs[i].uA > 0) {
262 ret = regulator_set_load(regs[i].reg,
263 regs[i].uA);
264 if (ret < 0) {
265 dev_err(qproc->dev,
266 "Failed to set regulator mode\n");
267 goto err;
268 }
269 }
270
271 ret = regulator_enable(regs[i].reg);
272 if (ret) {
273 dev_err(qproc->dev, "Regulator enable failed\n");
274 goto err;
275 }
276 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700277
278 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530279err:
280 for (; i >= 0; i--) {
281 if (regs[i].uV > 0)
282 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
283
284 if (regs[i].uA > 0)
285 regulator_set_load(regs[i].reg, 0);
286
287 regulator_disable(regs[i].reg);
288 }
289
290 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700291}
292
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530293static void q6v5_regulator_disable(struct q6v5 *qproc,
294 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700295{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530296 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700297
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530298 for (i = 0; i < count; i++) {
299 if (regs[i].uV > 0)
300 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700301
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530302 if (regs[i].uA > 0)
303 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700304
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530305 regulator_disable(regs[i].reg);
306 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700307}
308
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530309static int q6v5_clk_enable(struct device *dev,
310 struct clk **clks, int count)
311{
312 int rc;
313 int i;
314
315 for (i = 0; i < count; i++) {
316 rc = clk_prepare_enable(clks[i]);
317 if (rc) {
318 dev_err(dev, "Clock enable failed\n");
319 goto err;
320 }
321 }
322
323 return 0;
324err:
325 for (i--; i >= 0; i--)
326 clk_disable_unprepare(clks[i]);
327
328 return rc;
329}
330
331static void q6v5_clk_disable(struct device *dev,
332 struct clk **clks, int count)
333{
334 int i;
335
336 for (i = 0; i < count; i++)
337 clk_disable_unprepare(clks[i]);
338}
339
Rajendra Nayak4760a892019-01-30 16:39:30 -0800340static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
341 size_t pd_count)
342{
343 int ret;
344 int i;
345
346 for (i = 0; i < pd_count; i++) {
347 dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
348 ret = pm_runtime_get_sync(pds[i]);
349 if (ret < 0)
350 goto unroll_pd_votes;
351 }
352
353 return 0;
354
355unroll_pd_votes:
356 for (i--; i >= 0; i--) {
357 dev_pm_genpd_set_performance_state(pds[i], 0);
358 pm_runtime_put(pds[i]);
359 }
360
361 return ret;
Alex Elder58396812020-04-03 12:50:05 -0500362}
Rajendra Nayak4760a892019-01-30 16:39:30 -0800363
364static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
365 size_t pd_count)
366{
367 int i;
368
369 for (i = 0; i < pd_count; i++) {
370 dev_pm_genpd_set_performance_state(pds[i], 0);
371 pm_runtime_put(pds[i]);
372 }
373}
374
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530375static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +0530376 bool local, bool remote, phys_addr_t addr,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530377 size_t size)
378{
Bjorn Andersson715d8522020-03-05 01:17:28 +0530379 struct qcom_scm_vmperm next[2];
380 int perms = 0;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530381
382 if (!qproc->need_mem_protection)
383 return 0;
Bjorn Andersson715d8522020-03-05 01:17:28 +0530384
385 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
386 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530387 return 0;
388
Bjorn Andersson715d8522020-03-05 01:17:28 +0530389 if (local) {
390 next[perms].vmid = QCOM_SCM_VMID_HLOS;
391 next[perms].perm = QCOM_SCM_PERM_RWX;
392 perms++;
393 }
394
395 if (remote) {
396 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
397 next[perms].perm = QCOM_SCM_PERM_RW;
398 perms++;
399 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530400
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800401 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
Bjorn Andersson715d8522020-03-05 01:17:28 +0530402 current_perm, next, perms);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530403}
404
Bjorn Andersson051fb702016-06-20 14:28:41 -0700405static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
406{
407 struct q6v5 *qproc = rproc->priv;
408
409 memcpy(qproc->mba_region, fw->data, fw->size);
410
411 return 0;
412}
413
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530414static int q6v5_reset_assert(struct q6v5 *qproc)
415{
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530416 int ret;
417
418 if (qproc->has_alt_reset) {
419 reset_control_assert(qproc->pdc_reset);
420 ret = reset_control_reset(qproc->mss_restart);
421 reset_control_deassert(qproc->pdc_reset);
Sibi Sankara9fdc792020-04-15 20:21:10 +0530422 } else if (qproc->has_spare_reg) {
Sibi Sankar600c39b2020-01-23 18:42:36 +0530423 /*
424 * When the AXI pipeline is being reset with the Q6 modem partly
425 * operational there is possibility of AXI valid signal to
426 * glitch, leading to spurious transactions and Q6 hangs. A work
427 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
Sibi Sankara9fdc792020-04-15 20:21:10 +0530428 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
429 * is withdrawn post MSS assert followed by a MSS deassert,
430 * while holding the PDC reset.
Sibi Sankar600c39b2020-01-23 18:42:36 +0530431 */
Sibi Sankar6439b522019-12-19 11:15:06 +0530432 reset_control_assert(qproc->pdc_reset);
433 regmap_update_bits(qproc->conn_map, qproc->conn_box,
Sibi Sankar600c39b2020-01-23 18:42:36 +0530434 AXI_GATING_VALID_OVERRIDE, 1);
Sibi Sankar6439b522019-12-19 11:15:06 +0530435 reset_control_assert(qproc->mss_restart);
436 reset_control_deassert(qproc->pdc_reset);
437 regmap_update_bits(qproc->conn_map, qproc->conn_box,
Sibi Sankar600c39b2020-01-23 18:42:36 +0530438 AXI_GATING_VALID_OVERRIDE, 0);
Sibi Sankar6439b522019-12-19 11:15:06 +0530439 ret = reset_control_deassert(qproc->mss_restart);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530440 } else {
441 ret = reset_control_assert(qproc->mss_restart);
442 }
443
444 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530445}
446
447static int q6v5_reset_deassert(struct q6v5 *qproc)
448{
Sibi Sankar231f67d2018-05-21 22:57:13 +0530449 int ret;
450
451 if (qproc->has_alt_reset) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530452 reset_control_assert(qproc->pdc_reset);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530453 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
454 ret = reset_control_reset(qproc->mss_restart);
455 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530456 reset_control_deassert(qproc->pdc_reset);
Sibi Sankara9fdc792020-04-15 20:21:10 +0530457 } else if (qproc->has_spare_reg) {
Sibi Sankar6439b522019-12-19 11:15:06 +0530458 ret = reset_control_reset(qproc->mss_restart);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530459 } else {
460 ret = reset_control_deassert(qproc->mss_restart);
461 }
462
463 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530464}
465
Bjorn Andersson051fb702016-06-20 14:28:41 -0700466static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
467{
468 unsigned long timeout;
469 s32 val;
470
471 timeout = jiffies + msecs_to_jiffies(ms);
472 for (;;) {
473 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
474 if (val)
475 break;
476
477 if (time_after(jiffies, timeout))
478 return -ETIMEDOUT;
479
480 msleep(1);
481 }
482
483 return val;
484}
485
486static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
487{
488
489 unsigned long timeout;
490 s32 val;
491
492 timeout = jiffies + msecs_to_jiffies(ms);
493 for (;;) {
494 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
495 if (val < 0)
496 break;
497
498 if (!status && val)
499 break;
500 else if (status && val == status)
501 break;
502
503 if (time_after(jiffies, timeout))
504 return -ETIMEDOUT;
505
506 msleep(1);
507 }
508
509 return val;
510}
511
512static int q6v5proc_reset(struct q6v5 *qproc)
513{
514 u32 val;
515 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530516 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700517
Sibi Sankar231f67d2018-05-21 22:57:13 +0530518 if (qproc->version == MSS_SDM845) {
519 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530520 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530521 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700522
Sibi Sankar231f67d2018-05-21 22:57:13 +0530523 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530524 val, !(val & Q6SS_CBCR_CLKOFF), 1,
525 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530526 if (ret) {
527 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
528 return -ETIMEDOUT;
529 }
530
531 /* De-assert QDSP6 stop core */
532 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
533 /* Trigger boot FSM */
534 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
535
536 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
537 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
538 if (ret) {
539 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
540 /* Reset the modem so that boot FSM is in reset state */
541 q6v5_reset_deassert(qproc);
542 return ret;
543 }
544
545 goto pbl_wait;
Sibi Sankar6439b522019-12-19 11:15:06 +0530546 } else if (qproc->version == MSS_SC7180) {
547 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530548 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530549 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
550
551 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530552 val, !(val & Q6SS_CBCR_CLKOFF), 1,
553 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530554 if (ret) {
555 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
556 return -ETIMEDOUT;
557 }
558
559 /* Turn on the XO clock needed for PLL setup */
560 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530561 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530562 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
563
564 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530565 val, !(val & Q6SS_CBCR_CLKOFF), 1,
566 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530567 if (ret) {
568 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
569 return -ETIMEDOUT;
570 }
571
572 /* Configure Q6 core CBCR to auto-enable after reset sequence */
573 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530574 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530575 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
576
577 /* De-assert the Q6 stop core signal */
578 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
579
Sibi Sankar4e6751a2020-07-16 17:35:14 +0530580 /* Wait for 10 us for any staggering logic to settle */
581 usleep_range(10, 20);
582
Sibi Sankar6439b522019-12-19 11:15:06 +0530583 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
584 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
585
Sibi Sankar4e6751a2020-07-16 17:35:14 +0530586 /* Poll the MSS_STATUS for FSM completion */
587 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
588 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
Sibi Sankar6439b522019-12-19 11:15:06 +0530589 if (ret) {
590 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
591 /* Reset the modem so that boot FSM is in reset state */
592 q6v5_reset_deassert(qproc);
593 return ret;
594 }
595 goto pbl_wait;
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700596 } else if (qproc->version == MSS_MSM8996 ||
597 qproc->version == MSS_MSM8998) {
598 int mem_pwr_ctl;
599
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530600 /* Override the ACC value if required */
601 writel(QDSP6SS_ACC_OVERRIDE_VAL,
602 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700603
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530604 /* Assert resets, stop core */
605 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
606 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
607 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700608
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530609 /* BHS require xo cbcr to be enabled */
610 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530611 val |= Q6SS_CBCR_CLKEN;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530612 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
613
614 /* Read CLKOFF bit to go low indicating CLK is enabled */
615 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530616 val, !(val & Q6SS_CBCR_CLKOFF), 1,
617 Q6SS_CBCR_TIMEOUT_US);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530618 if (ret) {
619 dev_err(qproc->dev,
620 "xo cbcr enabling timed out (rc:%d)\n", ret);
621 return ret;
622 }
623 /* Enable power block headswitch and wait for it to stabilize */
624 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
625 val |= QDSP6v56_BHS_ON;
626 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
627 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
628 udelay(1);
629
630 /* Put LDO in bypass mode */
631 val |= QDSP6v56_LDO_BYP;
632 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
633
634 /* Deassert QDSP6 compiler memory clamp */
635 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
636 val &= ~QDSP6v56_CLAMP_QMC_MEM;
637 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
638
639 /* Deassert memory peripheral sleep and L2 memory standby */
640 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
641 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
642
643 /* Turn on L1, L2, ETB and JU memories 1 at a time */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700644 if (qproc->version == MSS_MSM8996) {
645 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
646 i = 19;
647 } else {
648 /* MSS_MSM8998 */
649 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
650 i = 28;
651 }
652 val = readl(qproc->reg_base + mem_pwr_ctl);
653 for (; i >= 0; i--) {
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530654 val |= BIT(i);
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700655 writel(val, qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530656 /*
657 * Read back value to ensure the write is done then
658 * wait for 1us for both memory peripheral and data
659 * array to turn on.
660 */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700661 val |= readl(qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530662 udelay(1);
663 }
664 /* Remove word line clamp */
665 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
666 val &= ~QDSP6v56_CLAMP_WL;
667 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
668 } else {
669 /* Assert resets, stop core */
670 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
671 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
672 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
673
674 /* Enable power block headswitch and wait for it to stabilize */
675 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
676 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
677 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
678 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
679 udelay(1);
680 /*
681 * Turn on memories. L2 banks should be done individually
682 * to minimize inrush current.
683 */
684 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
685 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
686 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
687 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
688 val |= Q6SS_L2DATA_SLP_NRET_N_2;
689 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
690 val |= Q6SS_L2DATA_SLP_NRET_N_1;
691 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
692 val |= Q6SS_L2DATA_SLP_NRET_N_0;
693 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
694 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700695 /* Remove IO clamp */
696 val &= ~Q6SS_CLAMP_IO;
697 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
698
699 /* Bring core out of reset */
700 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
701 val &= ~Q6SS_CORE_ARES;
702 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
703
704 /* Turn on core clock */
705 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
706 val |= Q6SS_CLK_ENABLE;
707 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
708
709 /* Start core execution */
710 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
711 val &= ~Q6SS_STOP_CORE;
712 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
713
Sibi Sankar231f67d2018-05-21 22:57:13 +0530714pbl_wait:
Bjorn Andersson051fb702016-06-20 14:28:41 -0700715 /* Wait for PBL status */
716 ret = q6v5_rmb_pbl_wait(qproc, 1000);
717 if (ret == -ETIMEDOUT) {
718 dev_err(qproc->dev, "PBL boot timed out\n");
719 } else if (ret != RMB_PBL_SUCCESS) {
720 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
721 ret = -EINVAL;
722 } else {
723 ret = 0;
724 }
725
726 return ret;
727}
728
729static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
730 struct regmap *halt_map,
731 u32 offset)
732{
Bjorn Andersson051fb702016-06-20 14:28:41 -0700733 unsigned int val;
734 int ret;
735
736 /* Check if we're already idle */
737 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
738 if (!ret && val)
739 return;
740
741 /* Assert halt request */
742 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
743
744 /* Wait for halt */
Sibi Sankar01bf3fe2020-01-23 18:42:35 +0530745 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
746 val, 1000, HALT_ACK_TIMEOUT_US);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700747
748 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
749 if (ret || !val)
750 dev_err(qproc->dev, "port failed halt\n");
751
752 /* Clear halt request (port will remain halted until reset) */
753 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
754}
755
756static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
757{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700758 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700759 dma_addr_t phys;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700760 void *metadata;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530761 int mdata_perm;
762 int xferop_ret;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700763 size_t size;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700764 void *ptr;
765 int ret;
766
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700767 metadata = qcom_mdt_read_metadata(fw, &size);
768 if (IS_ERR(metadata))
769 return PTR_ERR(metadata);
770
771 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700772 if (!ptr) {
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700773 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700774 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
775 return -ENOMEM;
776 }
777
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700778 memcpy(ptr, metadata, size);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700779
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530780 /* Hypervisor mapping to access metadata by modem */
781 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Andersson715d8522020-03-05 01:17:28 +0530782 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
783 phys, size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800784 if (ret) {
785 dev_err(qproc->dev,
786 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100787 ret = -EAGAIN;
788 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800789 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530790
Bjorn Andersson051fb702016-06-20 14:28:41 -0700791 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
792 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
793
794 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
795 if (ret == -ETIMEDOUT)
796 dev_err(qproc->dev, "MPSS header authentication timed out\n");
797 else if (ret < 0)
798 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
799
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530800 /* Metadata authentication done, remove modem access */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530801 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
802 phys, size);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530803 if (xferop_ret)
804 dev_warn(qproc->dev,
805 "mdt buffer not reclaimed system may become unstable\n");
806
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100807free_dma_attrs:
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700808 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
809 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700810
811 return ret < 0 ? ret : 0;
812}
813
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800814static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
815{
816 if (phdr->p_type != PT_LOAD)
817 return false;
818
819 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
820 return false;
821
822 if (!phdr->p_memsz)
823 return false;
824
825 return true;
826}
827
Sibi Sankar03045302018-10-17 19:25:25 +0530828static int q6v5_mba_load(struct q6v5 *qproc)
829{
830 int ret;
831 int xfermemop_ret;
832
833 qcom_q6v5_prepare(&qproc->q6v5);
834
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800835 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
836 if (ret < 0) {
837 dev_err(qproc->dev, "failed to enable active power domains\n");
838 goto disable_irqs;
839 }
840
Rajendra Nayak4760a892019-01-30 16:39:30 -0800841 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
842 if (ret < 0) {
843 dev_err(qproc->dev, "failed to enable proxy power domains\n");
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800844 goto disable_active_pds;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800845 }
846
Sibi Sankar03045302018-10-17 19:25:25 +0530847 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
848 qproc->proxy_reg_count);
849 if (ret) {
850 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Rajendra Nayak4760a892019-01-30 16:39:30 -0800851 goto disable_proxy_pds;
Sibi Sankar03045302018-10-17 19:25:25 +0530852 }
853
854 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
855 qproc->proxy_clk_count);
856 if (ret) {
857 dev_err(qproc->dev, "failed to enable proxy clocks\n");
858 goto disable_proxy_reg;
859 }
860
861 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
862 qproc->active_reg_count);
863 if (ret) {
864 dev_err(qproc->dev, "failed to enable supplies\n");
865 goto disable_proxy_clk;
866 }
867
868 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
869 qproc->reset_clk_count);
870 if (ret) {
871 dev_err(qproc->dev, "failed to enable reset clocks\n");
872 goto disable_vdd;
873 }
874
875 ret = q6v5_reset_deassert(qproc);
876 if (ret) {
877 dev_err(qproc->dev, "failed to deassert mss restart\n");
878 goto disable_reset_clks;
879 }
880
881 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
882 qproc->active_clk_count);
883 if (ret) {
884 dev_err(qproc->dev, "failed to enable clocks\n");
885 goto assert_reset;
886 }
887
888 /* Assign MBA image access in DDR to q6 */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530889 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
Sibi Sankar03045302018-10-17 19:25:25 +0530890 qproc->mba_phys, qproc->mba_size);
891 if (ret) {
892 dev_err(qproc->dev,
893 "assigning Q6 access to mba memory failed: %d\n", ret);
894 goto disable_active_clks;
895 }
896
897 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
898
899 ret = q6v5proc_reset(qproc);
900 if (ret)
901 goto reclaim_mba;
902
903 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
904 if (ret == -ETIMEDOUT) {
905 dev_err(qproc->dev, "MBA boot timed out\n");
906 goto halt_axi_ports;
907 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
908 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
909 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
910 ret = -EINVAL;
911 goto halt_axi_ports;
912 }
913
914 qproc->dump_mba_loaded = true;
915 return 0;
916
917halt_axi_ports:
918 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
919 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
920 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
921
922reclaim_mba:
Bjorn Andersson715d8522020-03-05 01:17:28 +0530923 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
924 false, qproc->mba_phys,
Sibi Sankar03045302018-10-17 19:25:25 +0530925 qproc->mba_size);
926 if (xfermemop_ret) {
927 dev_err(qproc->dev,
928 "Failed to reclaim mba buffer, system may become unstable\n");
929 }
930
931disable_active_clks:
932 q6v5_clk_disable(qproc->dev, qproc->active_clks,
933 qproc->active_clk_count);
934assert_reset:
935 q6v5_reset_assert(qproc);
936disable_reset_clks:
937 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
938 qproc->reset_clk_count);
939disable_vdd:
940 q6v5_regulator_disable(qproc, qproc->active_regs,
941 qproc->active_reg_count);
942disable_proxy_clk:
943 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
944 qproc->proxy_clk_count);
945disable_proxy_reg:
946 q6v5_regulator_disable(qproc, qproc->proxy_regs,
947 qproc->proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -0800948disable_proxy_pds:
949 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800950disable_active_pds:
951 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +0530952disable_irqs:
953 qcom_q6v5_unprepare(&qproc->q6v5);
954
955 return ret;
956}
957
958static void q6v5_mba_reclaim(struct q6v5 *qproc)
959{
960 int ret;
961 u32 val;
962
963 qproc->dump_mba_loaded = false;
964
965 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
966 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
967 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
968 if (qproc->version == MSS_MSM8996) {
969 /*
970 * To avoid high MX current during LPASS/MSS restart.
971 */
972 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
973 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
974 QDSP6v56_CLAMP_QMC_MEM;
975 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
976 }
977
Sibi Sankar03045302018-10-17 19:25:25 +0530978 q6v5_reset_assert(qproc);
979
980 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
981 qproc->reset_clk_count);
982 q6v5_clk_disable(qproc->dev, qproc->active_clks,
983 qproc->active_clk_count);
984 q6v5_regulator_disable(qproc, qproc->active_regs,
985 qproc->active_reg_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800986 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +0530987
988 /* In case of failure or coredump scenario where reclaiming MBA memory
989 * could not happen reclaim it here.
990 */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530991 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
Sibi Sankar03045302018-10-17 19:25:25 +0530992 qproc->mba_phys,
993 qproc->mba_size);
994 WARN_ON(ret);
995
996 ret = qcom_q6v5_unprepare(&qproc->q6v5);
997 if (ret) {
Rajendra Nayak4760a892019-01-30 16:39:30 -0800998 q6v5_pds_disable(qproc, qproc->proxy_pds,
999 qproc->proxy_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301000 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1001 qproc->proxy_clk_count);
1002 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1003 qproc->proxy_reg_count);
1004 }
1005}
1006
Sibi Sankard96f2572020-03-05 01:17:29 +05301007static int q6v5_reload_mba(struct rproc *rproc)
1008{
1009 struct q6v5 *qproc = rproc->priv;
1010 const struct firmware *fw;
1011 int ret;
1012
1013 ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1014 if (ret < 0)
1015 return ret;
1016
1017 q6v5_load(rproc, fw);
1018 ret = q6v5_mba_load(qproc);
1019 release_firmware(fw);
1020
1021 return ret;
1022}
1023
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001024static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001025{
1026 const struct elf32_phdr *phdrs;
1027 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001028 const struct firmware *seg_fw;
1029 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001030 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001031 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001032 phys_addr_t boot_addr;
Stefan Agnerd7dc8992018-06-14 15:28:02 -07001033 phys_addr_t min_addr = PHYS_ADDR_MAX;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001034 phys_addr_t max_addr = 0;
Bjorn Andersson715d8522020-03-05 01:17:28 +05301035 u32 code_length;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001036 bool relocate = false;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301037 char *fw_name;
1038 size_t fw_name_len;
Bjorn Andersson01625cc52017-02-15 14:00:41 -08001039 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301040 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001041 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001042 int ret;
1043 int i;
1044
Sibi Sankara5a4e022019-01-15 01:20:01 +05301045 fw_name_len = strlen(qproc->hexagon_mdt_image);
1046 if (fw_name_len <= 4)
1047 return -EINVAL;
1048
1049 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1050 if (!fw_name)
1051 return -ENOMEM;
1052
1053 ret = request_firmware(&fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001054 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301055 dev_err(qproc->dev, "unable to load %s\n", fw_name);
1056 goto out;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001057 }
1058
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001059 /* Initialize the RMB validator */
1060 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1061
1062 ret = q6v5_mpss_init_image(qproc, fw);
1063 if (ret)
1064 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001065
1066 ehdr = (struct elf32_hdr *)fw->data;
1067 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001068
1069 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001070 phdr = &phdrs[i];
1071
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001072 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001073 continue;
1074
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001075 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1076 relocate = true;
1077
1078 if (phdr->p_paddr < min_addr)
1079 min_addr = phdr->p_paddr;
1080
1081 if (phdr->p_paddr + phdr->p_memsz > max_addr)
1082 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1083 }
1084
Bjorn Andersson900fc602020-03-05 01:17:27 +05301085 /**
1086 * In case of a modem subsystem restart on secure devices, the modem
1087 * memory can be reclaimed only after MBA is loaded. For modem cold
1088 * boot this will be a nop
1089 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301090 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301091 qproc->mpss_phys, qproc->mpss_size);
1092
Bjorn Andersson715d8522020-03-05 01:17:28 +05301093 /* Share ownership between Linux and MSS, during segment loading */
1094 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1095 qproc->mpss_phys, qproc->mpss_size);
1096 if (ret) {
1097 dev_err(qproc->dev,
1098 "assigning Q6 access to mpss memory failed: %d\n", ret);
1099 ret = -EAGAIN;
1100 goto release_firmware;
1101 }
1102
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001103 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Sibi Sankar3bf62eb2018-07-27 20:50:03 +05301104 qproc->mpss_reloc = mpss_reloc;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301105 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001106 for (i = 0; i < ehdr->e_phnum; i++) {
1107 phdr = &phdrs[i];
1108
1109 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001110 continue;
1111
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001112 offset = phdr->p_paddr - mpss_reloc;
1113 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1114 dev_err(qproc->dev, "segment outside memory range\n");
1115 ret = -EINVAL;
1116 goto release_firmware;
1117 }
1118
Sibi Sankarbe050a32020-04-15 12:46:18 +05301119 ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz);
1120 if (!ptr) {
1121 dev_err(qproc->dev,
1122 "unable to map memory region: %pa+%zx-%x\n",
1123 &qproc->mpss_phys, offset, phdr->p_memsz);
1124 goto release_firmware;
1125 }
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001126
Bjorn Anderssonf04b9132019-06-21 18:21:46 -07001127 if (phdr->p_filesz && phdr->p_offset < fw->size) {
1128 /* Firmware is large enough to be non-split */
1129 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1130 dev_err(qproc->dev,
1131 "failed to load segment %d from truncated file %s\n",
1132 i, fw_name);
1133 ret = -EINVAL;
Sibi Sankarbe050a32020-04-15 12:46:18 +05301134 iounmap(ptr);
Bjorn Anderssonf04b9132019-06-21 18:21:46 -07001135 goto release_firmware;
1136 }
1137
1138 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1139 } else if (phdr->p_filesz) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301140 /* Replace "xxx.xxx" with "xxx.bxx" */
1141 sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1142 ret = request_firmware(&seg_fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001143 if (ret) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301144 dev_err(qproc->dev, "failed to load %s\n", fw_name);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301145 iounmap(ptr);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001146 goto release_firmware;
1147 }
1148
1149 memcpy(ptr, seg_fw->data, seg_fw->size);
1150
1151 release_firmware(seg_fw);
1152 }
1153
1154 if (phdr->p_memsz > phdr->p_filesz) {
1155 memset(ptr + phdr->p_filesz, 0,
1156 phdr->p_memsz - phdr->p_filesz);
1157 }
Sibi Sankarbe050a32020-04-15 12:46:18 +05301158 iounmap(ptr);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001159 size += phdr->p_memsz;
Bjorn Andersson715d8522020-03-05 01:17:28 +05301160
1161 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1162 if (!code_length) {
1163 boot_addr = relocate ? qproc->mpss_phys : min_addr;
1164 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1165 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1166 }
1167 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1168
1169 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1170 if (ret < 0) {
1171 dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1172 ret);
1173 goto release_firmware;
1174 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001175 }
1176
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301177 /* Transfer ownership of modem ddr region to q6 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301178 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301179 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001180 if (ret) {
1181 dev_err(qproc->dev,
1182 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +01001183 ret = -EAGAIN;
1184 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001185 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301186
Bjorn Andersson72beb492016-07-12 17:15:45 -07001187 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1188 if (ret == -ETIMEDOUT)
1189 dev_err(qproc->dev, "MPSS authentication timed out\n");
1190 else if (ret < 0)
1191 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1192
Bjorn Anderssond4c78d22020-06-22 12:19:40 -07001193 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
1194
Bjorn Andersson051fb702016-06-20 14:28:41 -07001195release_firmware:
1196 release_firmware(fw);
Sibi Sankara5a4e022019-01-15 01:20:01 +05301197out:
1198 kfree(fw_name);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001199
1200 return ret < 0 ? ret : 0;
1201}
1202
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301203static void qcom_q6v5_dump_segment(struct rproc *rproc,
1204 struct rproc_dump_segment *segment,
1205 void *dest)
1206{
1207 int ret = 0;
1208 struct q6v5 *qproc = rproc->priv;
Sibi Sankarbe050a32020-04-15 12:46:18 +05301209 int offset = segment->da - qproc->mpss_reloc;
1210 void *ptr = NULL;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301211
1212 /* Unlock mba before copying segments */
Bjorn Andersson900fc602020-03-05 01:17:27 +05301213 if (!qproc->dump_mba_loaded) {
Sibi Sankard96f2572020-03-05 01:17:29 +05301214 ret = q6v5_reload_mba(rproc);
Bjorn Andersson900fc602020-03-05 01:17:27 +05301215 if (!ret) {
1216 /* Reset ownership back to Linux to copy segments */
1217 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +05301218 true, false,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301219 qproc->mpss_phys,
1220 qproc->mpss_size);
1221 }
1222 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301223
Sibi Sankarbe050a32020-04-15 12:46:18 +05301224 if (!ret)
1225 ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
1226
1227 if (ptr) {
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301228 memcpy(dest, ptr, segment->size);
Sibi Sankarbe050a32020-04-15 12:46:18 +05301229 iounmap(ptr);
1230 } else {
1231 memset(dest, 0xff, segment->size);
1232 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301233
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001234 qproc->current_dump_size += segment->size;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301235
1236 /* Reclaim mba after copying segments */
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001237 if (qproc->current_dump_size == qproc->total_dump_size) {
Bjorn Andersson900fc602020-03-05 01:17:27 +05301238 if (qproc->dump_mba_loaded) {
1239 /* Try to reset ownership back to Q6 */
1240 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +05301241 false, true,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301242 qproc->mpss_phys,
1243 qproc->mpss_size);
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301244 q6v5_mba_reclaim(qproc);
Bjorn Andersson900fc602020-03-05 01:17:27 +05301245 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301246 }
1247}
1248
Bjorn Andersson051fb702016-06-20 14:28:41 -07001249static int q6v5_start(struct rproc *rproc)
1250{
1251 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301252 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001253 int ret;
1254
Sibi Sankar03045302018-10-17 19:25:25 +05301255 ret = q6v5_mba_load(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001256 if (ret)
Sibi Sankar03045302018-10-17 19:25:25 +05301257 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001258
1259 dev_info(qproc->dev, "MBA booted, loading mpss\n");
1260
1261 ret = q6v5_mpss_load(qproc);
1262 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301263 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001264
Bjorn Andersson7d674732018-06-04 13:30:38 -07001265 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1266 if (ret == -ETIMEDOUT) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001267 dev_err(qproc->dev, "start timed out\n");
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301268 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001269 }
1270
Bjorn Andersson715d8522020-03-05 01:17:28 +05301271 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1272 false, qproc->mba_phys,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301273 qproc->mba_size);
1274 if (xfermemop_ret)
1275 dev_err(qproc->dev,
1276 "Failed to reclaim mba buffer system may become unstable\n");
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301277
1278 /* Reset Dump Segment Mask */
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001279 qproc->current_dump_size = 0;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001280 qproc->running = true;
1281
Bjorn Andersson051fb702016-06-20 14:28:41 -07001282 return 0;
1283
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301284reclaim_mpss:
Sibi Sankar03045302018-10-17 19:25:25 +05301285 q6v5_mba_reclaim(qproc);
Sibi Sankar663e9842018-05-21 22:57:09 +05301286
Bjorn Andersson051fb702016-06-20 14:28:41 -07001287 return ret;
1288}
1289
1290static int q6v5_stop(struct rproc *rproc)
1291{
1292 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1293 int ret;
1294
1295 qproc->running = false;
1296
Bjorn Andersson7d674732018-06-04 13:30:38 -07001297 ret = qcom_q6v5_request_stop(&qproc->q6v5);
1298 if (ret == -ETIMEDOUT)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001299 dev_err(qproc->dev, "timed out on wait\n");
1300
Sibi Sankar03045302018-10-17 19:25:25 +05301301 q6v5_mba_reclaim(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001302
1303 return 0;
1304}
1305
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301306static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1307 const struct firmware *mba_fw)
1308{
1309 const struct firmware *fw;
1310 const struct elf32_phdr *phdrs;
1311 const struct elf32_phdr *phdr;
1312 const struct elf32_hdr *ehdr;
1313 struct q6v5 *qproc = rproc->priv;
1314 unsigned long i;
1315 int ret;
1316
Sibi Sankara5a4e022019-01-15 01:20:01 +05301317 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301318 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301319 dev_err(qproc->dev, "unable to load %s\n",
1320 qproc->hexagon_mdt_image);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301321 return ret;
1322 }
1323
Clement Leger3898fc92020-04-10 12:24:33 +02001324 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1325
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301326 ehdr = (struct elf32_hdr *)fw->data;
1327 phdrs = (struct elf32_phdr *)(ehdr + 1);
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001328 qproc->total_dump_size = 0;
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301329
1330 for (i = 0; i < ehdr->e_phnum; i++) {
1331 phdr = &phdrs[i];
1332
1333 if (!q6v5_phdr_valid(phdr))
1334 continue;
1335
1336 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1337 phdr->p_memsz,
1338 qcom_q6v5_dump_segment,
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001339 NULL);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301340 if (ret)
1341 break;
1342
Sibi Sankar7ac516d2020-07-16 15:20:32 -07001343 qproc->total_dump_size += phdr->p_memsz;
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301344 }
1345
1346 release_firmware(fw);
1347 return ret;
1348}
1349
Bjorn Andersson051fb702016-06-20 14:28:41 -07001350static const struct rproc_ops q6v5_ops = {
1351 .start = q6v5_start,
1352 .stop = q6v5_stop,
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301353 .parse_fw = qcom_q6v5_register_dump_segments,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -08001354 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -07001355};
1356
Bjorn Andersson7d674732018-06-04 13:30:38 -07001357static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001358{
Bjorn Andersson7d674732018-06-04 13:30:38 -07001359 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +05301360
1361 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1362 qproc->proxy_clk_count);
1363 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1364 qproc->proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001365 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001366}
1367
1368static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1369{
1370 struct of_phandle_args args;
1371 struct resource *res;
1372 int ret;
1373
1374 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1375 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001376 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001377 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001378
1379 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1380 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001381 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001382 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001383
1384 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1385 "qcom,halt-regs", 3, 0, &args);
1386 if (ret < 0) {
1387 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1388 return -EINVAL;
1389 }
1390
1391 qproc->halt_map = syscon_node_to_regmap(args.np);
1392 of_node_put(args.np);
1393 if (IS_ERR(qproc->halt_map))
1394 return PTR_ERR(qproc->halt_map);
1395
1396 qproc->halt_q6 = args.args[0];
1397 qproc->halt_modem = args.args[1];
1398 qproc->halt_nc = args.args[2];
1399
Sibi Sankara9fdc792020-04-15 20:21:10 +05301400 if (qproc->has_spare_reg) {
Sibi Sankar6439b522019-12-19 11:15:06 +05301401 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301402 "qcom,spare-regs",
Sibi Sankar6439b522019-12-19 11:15:06 +05301403 1, 0, &args);
1404 if (ret < 0) {
Sibi Sankara9fdc792020-04-15 20:21:10 +05301405 dev_err(&pdev->dev, "failed to parse spare-regs\n");
Sibi Sankar6439b522019-12-19 11:15:06 +05301406 return -EINVAL;
1407 }
1408
1409 qproc->conn_map = syscon_node_to_regmap(args.np);
1410 of_node_put(args.np);
1411 if (IS_ERR(qproc->conn_map))
1412 return PTR_ERR(qproc->conn_map);
1413
1414 qproc->conn_box = args.args[0];
1415 }
1416
Bjorn Andersson051fb702016-06-20 14:28:41 -07001417 return 0;
1418}
1419
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301420static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1421 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001422{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301423 int i;
1424
1425 if (!clk_names)
1426 return 0;
1427
1428 for (i = 0; clk_names[i]; i++) {
1429 clks[i] = devm_clk_get(dev, clk_names[i]);
1430 if (IS_ERR(clks[i])) {
1431 int rc = PTR_ERR(clks[i]);
1432
1433 if (rc != -EPROBE_DEFER)
1434 dev_err(dev, "Failed to get %s clock\n",
1435 clk_names[i]);
1436 return rc;
1437 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001438 }
1439
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301440 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001441}
1442
Rajendra Nayak4760a892019-01-30 16:39:30 -08001443static int q6v5_pds_attach(struct device *dev, struct device **devs,
1444 char **pd_names)
1445{
1446 size_t num_pds = 0;
1447 int ret;
1448 int i;
1449
1450 if (!pd_names)
1451 return 0;
1452
1453 while (pd_names[num_pds])
1454 num_pds++;
1455
1456 for (i = 0; i < num_pds; i++) {
1457 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
Sibi Sankarf2583fd2019-08-21 23:35:48 +05301458 if (IS_ERR_OR_NULL(devs[i])) {
1459 ret = PTR_ERR(devs[i]) ? : -ENODATA;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001460 goto unroll_attach;
1461 }
1462 }
1463
1464 return num_pds;
1465
1466unroll_attach:
1467 for (i--; i >= 0; i--)
1468 dev_pm_domain_detach(devs[i], false);
1469
1470 return ret;
Alex Elder58396812020-04-03 12:50:05 -05001471}
Rajendra Nayak4760a892019-01-30 16:39:30 -08001472
1473static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1474 size_t pd_count)
1475{
1476 int i;
1477
1478 for (i = 0; i < pd_count; i++)
1479 dev_pm_domain_detach(pds[i], false);
1480}
1481
Bjorn Andersson051fb702016-06-20 14:28:41 -07001482static int q6v5_init_reset(struct q6v5 *qproc)
1483{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001484 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
Sibi Sankar9e483ef2018-08-30 00:42:14 +05301485 "mss_restart");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001486 if (IS_ERR(qproc->mss_restart)) {
1487 dev_err(qproc->dev, "failed to acquire mss restart\n");
1488 return PTR_ERR(qproc->mss_restart);
1489 }
1490
Sibi Sankara9fdc792020-04-15 20:21:10 +05301491 if (qproc->has_alt_reset || qproc->has_spare_reg) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301492 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1493 "pdc_reset");
1494 if (IS_ERR(qproc->pdc_reset)) {
1495 dev_err(qproc->dev, "failed to acquire pdc reset\n");
1496 return PTR_ERR(qproc->pdc_reset);
1497 }
1498 }
1499
Bjorn Andersson051fb702016-06-20 14:28:41 -07001500 return 0;
1501}
1502
Bjorn Andersson051fb702016-06-20 14:28:41 -07001503static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1504{
1505 struct device_node *child;
1506 struct device_node *node;
1507 struct resource r;
1508 int ret;
1509
Sibi Sankar6663ce62020-04-21 20:02:25 +05301510 /*
1511 * In the absence of mba/mpss sub-child, extract the mba and mpss
1512 * reserved memory regions from device's memory-region property.
1513 */
Bjorn Andersson051fb702016-06-20 14:28:41 -07001514 child = of_get_child_by_name(qproc->dev->of_node, "mba");
Sibi Sankar6663ce62020-04-21 20:02:25 +05301515 if (!child)
1516 node = of_parse_phandle(qproc->dev->of_node,
1517 "memory-region", 0);
1518 else
1519 node = of_parse_phandle(child, "memory-region", 0);
1520
Bjorn Andersson051fb702016-06-20 14:28:41 -07001521 ret = of_address_to_resource(node, 0, &r);
1522 if (ret) {
1523 dev_err(qproc->dev, "unable to resolve mba region\n");
1524 return ret;
1525 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001526 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001527
1528 qproc->mba_phys = r.start;
1529 qproc->mba_size = resource_size(&r);
1530 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1531 if (!qproc->mba_region) {
1532 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1533 &r.start, qproc->mba_size);
1534 return -EBUSY;
1535 }
1536
Sibi Sankar6663ce62020-04-21 20:02:25 +05301537 if (!child) {
1538 node = of_parse_phandle(qproc->dev->of_node,
1539 "memory-region", 1);
1540 } else {
1541 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1542 node = of_parse_phandle(child, "memory-region", 0);
1543 }
1544
Bjorn Andersson051fb702016-06-20 14:28:41 -07001545 ret = of_address_to_resource(node, 0, &r);
1546 if (ret) {
1547 dev_err(qproc->dev, "unable to resolve mpss region\n");
1548 return ret;
1549 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001550 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001551
1552 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1553 qproc->mpss_size = resource_size(&r);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001554
1555 return 0;
1556}
1557
Alex Elderd7f5f3c2020-03-05 22:28:15 -06001558#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
1559
1560/* Register IPA notification function */
1561int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
1562 void *data)
1563{
1564 struct qcom_rproc_ipa_notify *ipa_notify;
1565 struct q6v5 *qproc = rproc->priv;
1566
1567 if (!notify)
1568 return -EINVAL;
1569
1570 ipa_notify = &qproc->ipa_notify_subdev;
1571 if (ipa_notify->notify)
1572 return -EBUSY;
1573
1574 ipa_notify->notify = notify;
1575 ipa_notify->data = data;
1576
1577 return 0;
1578}
1579EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
1580
1581/* Deregister IPA notification function */
1582void qcom_deregister_ipa_notify(struct rproc *rproc)
1583{
1584 struct q6v5 *qproc = rproc->priv;
1585
1586 qproc->ipa_notify_subdev.notify = NULL;
1587}
1588EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
1589#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
1590
Bjorn Andersson051fb702016-06-20 14:28:41 -07001591static int q6v5_probe(struct platform_device *pdev)
1592{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301593 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001594 struct q6v5 *qproc;
1595 struct rproc *rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301596 const char *mba_image;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001597 int ret;
1598
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301599 desc = of_device_get_match_data(&pdev->dev);
1600 if (!desc)
1601 return -EINVAL;
1602
Brian Norrisbbcda302018-10-08 19:08:05 -07001603 if (desc->need_mem_protection && !qcom_scm_is_available())
1604 return -EPROBE_DEFER;
1605
Sibi Sankara5a4e022019-01-15 01:20:01 +05301606 mba_image = desc->hexagon_mba_image;
1607 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1608 0, &mba_image);
1609 if (ret < 0 && ret != -EINVAL)
1610 return ret;
1611
Bjorn Andersson051fb702016-06-20 14:28:41 -07001612 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Sibi Sankara5a4e022019-01-15 01:20:01 +05301613 mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001614 if (!rproc) {
1615 dev_err(&pdev->dev, "failed to allocate rproc\n");
1616 return -ENOMEM;
1617 }
1618
Ramon Fried41071022018-05-24 22:21:41 +03001619 rproc->auto_boot = false;
Clement Leger3898fc92020-04-10 12:24:33 +02001620 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
Ramon Fried41071022018-05-24 22:21:41 +03001621
Bjorn Andersson051fb702016-06-20 14:28:41 -07001622 qproc = (struct q6v5 *)rproc->priv;
1623 qproc->dev = &pdev->dev;
1624 qproc->rproc = rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301625 qproc->hexagon_mdt_image = "modem.mdt";
1626 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1627 1, &qproc->hexagon_mdt_image);
1628 if (ret < 0 && ret != -EINVAL)
Alex Elder13c060b2020-04-03 12:50:04 -05001629 goto free_rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301630
Bjorn Andersson051fb702016-06-20 14:28:41 -07001631 platform_set_drvdata(pdev, qproc);
1632
Sibi Sankara9fdc792020-04-15 20:21:10 +05301633 qproc->has_spare_reg = desc->has_spare_reg;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001634 ret = q6v5_init_mem(qproc, pdev);
1635 if (ret)
1636 goto free_rproc;
1637
1638 ret = q6v5_alloc_memory_region(qproc);
1639 if (ret)
1640 goto free_rproc;
1641
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301642 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1643 desc->proxy_clk_names);
1644 if (ret < 0) {
1645 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001646 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301647 }
1648 qproc->proxy_clk_count = ret;
1649
Sibi Sankar231f67d2018-05-21 22:57:13 +05301650 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1651 desc->reset_clk_names);
1652 if (ret < 0) {
1653 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1654 goto free_rproc;
1655 }
1656 qproc->reset_clk_count = ret;
1657
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301658 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1659 desc->active_clk_names);
1660 if (ret < 0) {
1661 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1662 goto free_rproc;
1663 }
1664 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001665
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301666 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1667 desc->proxy_supply);
1668 if (ret < 0) {
1669 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001670 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301671 }
1672 qproc->proxy_reg_count = ret;
1673
1674 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1675 desc->active_supply);
1676 if (ret < 0) {
1677 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1678 goto free_rproc;
1679 }
1680 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001681
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001682 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1683 desc->active_pd_names);
1684 if (ret < 0) {
1685 dev_err(&pdev->dev, "Failed to attach active power domains\n");
1686 goto free_rproc;
1687 }
1688 qproc->active_pd_count = ret;
1689
Rajendra Nayak4760a892019-01-30 16:39:30 -08001690 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1691 desc->proxy_pd_names);
1692 if (ret < 0) {
1693 dev_err(&pdev->dev, "Failed to init power domains\n");
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001694 goto detach_active_pds;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001695 }
1696 qproc->proxy_pd_count = ret;
1697
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301698 qproc->has_alt_reset = desc->has_alt_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001699 ret = q6v5_init_reset(qproc);
1700 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001701 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001702
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301703 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301704 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson7d674732018-06-04 13:30:38 -07001705
1706 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1707 qcom_msa_handover);
1708 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001709 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001710
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301711 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1712 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Anderssoncd9fc8f2020-04-22 17:37:33 -07001713 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
Bjorn Andersson4b489212017-01-29 14:05:50 -08001714 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001715 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Alex Elderd7f5f3c2020-03-05 22:28:15 -06001716 qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001717 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
Sibi Sankar027045a2019-01-08 15:53:43 +05301718 if (IS_ERR(qproc->sysmon)) {
1719 ret = PTR_ERR(qproc->sysmon);
Alex Elder58396812020-04-03 12:50:05 -05001720 goto remove_subdevs;
Sibi Sankar027045a2019-01-08 15:53:43 +05301721 }
Bjorn Andersson4b489212017-01-29 14:05:50 -08001722
Bjorn Andersson051fb702016-06-20 14:28:41 -07001723 ret = rproc_add(rproc);
1724 if (ret)
Alex Elder58396812020-04-03 12:50:05 -05001725 goto remove_sysmon_subdev;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001726
1727 return 0;
1728
Alex Elder58396812020-04-03 12:50:05 -05001729remove_sysmon_subdev:
1730 qcom_remove_sysmon_subdev(qproc->sysmon);
1731remove_subdevs:
Alex Elderd7f5f3c2020-03-05 22:28:15 -06001732 qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
Alex Elder58396812020-04-03 12:50:05 -05001733 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1734 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1735 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
1736detach_proxy_pds:
Rajendra Nayak4760a892019-01-30 16:39:30 -08001737 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001738detach_active_pds:
1739 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001740free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001741 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001742
1743 return ret;
1744}
1745
1746static int q6v5_remove(struct platform_device *pdev)
1747{
1748 struct q6v5 *qproc = platform_get_drvdata(pdev);
Alex Elder58396812020-04-03 12:50:05 -05001749 struct rproc *rproc = qproc->rproc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001750
Alex Elder58396812020-04-03 12:50:05 -05001751 rproc_del(rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001752
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001753 qcom_remove_sysmon_subdev(qproc->sysmon);
Alex Elder58396812020-04-03 12:50:05 -05001754 qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
1755 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1756 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1757 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001758
1759 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Alex Elder58396812020-04-03 12:50:05 -05001760 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001761
Alex Elder58396812020-04-03 12:50:05 -05001762 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001763
1764 return 0;
1765}
1766
Sibi Sankar6439b522019-12-19 11:15:06 +05301767static const struct rproc_hexagon_res sc7180_mss = {
1768 .hexagon_mba_image = "mba.mbn",
1769 .proxy_clk_names = (char*[]){
1770 "xo",
1771 NULL
1772 },
1773 .reset_clk_names = (char*[]){
1774 "iface",
1775 "bus",
1776 "snoc_axi",
1777 NULL
1778 },
1779 .active_clk_names = (char*[]){
1780 "mnoc_axi",
1781 "nav",
Sibi Sankar6439b522019-12-19 11:15:06 +05301782 NULL
1783 },
1784 .active_pd_names = (char*[]){
1785 "load_state",
1786 NULL
1787 },
1788 .proxy_pd_names = (char*[]){
1789 "cx",
1790 "mx",
1791 "mss",
1792 NULL
1793 },
1794 .need_mem_protection = true,
1795 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301796 .has_spare_reg = true,
Sibi Sankar6439b522019-12-19 11:15:06 +05301797 .version = MSS_SC7180,
1798};
1799
Sibi Sankar231f67d2018-05-21 22:57:13 +05301800static const struct rproc_hexagon_res sdm845_mss = {
1801 .hexagon_mba_image = "mba.mbn",
1802 .proxy_clk_names = (char*[]){
1803 "xo",
Sibi Sankar231f67d2018-05-21 22:57:13 +05301804 "prng",
1805 NULL
1806 },
1807 .reset_clk_names = (char*[]){
1808 "iface",
1809 "snoc_axi",
1810 NULL
1811 },
1812 .active_clk_names = (char*[]){
1813 "bus",
1814 "mem",
1815 "gpll0_mss",
1816 "mnoc_axi",
1817 NULL
1818 },
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001819 .active_pd_names = (char*[]){
1820 "load_state",
1821 NULL
1822 },
Rajendra Nayak4760a892019-01-30 16:39:30 -08001823 .proxy_pd_names = (char*[]){
1824 "cx",
1825 "mx",
1826 "mss",
1827 NULL
1828 },
Sibi Sankar231f67d2018-05-21 22:57:13 +05301829 .need_mem_protection = true,
1830 .has_alt_reset = true,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301831 .has_spare_reg = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301832 .version = MSS_SDM845,
1833};
1834
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001835static const struct rproc_hexagon_res msm8998_mss = {
1836 .hexagon_mba_image = "mba.mbn",
1837 .proxy_clk_names = (char*[]){
1838 "xo",
1839 "qdss",
1840 "mem",
1841 NULL
1842 },
1843 .active_clk_names = (char*[]){
1844 "iface",
1845 "bus",
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001846 "gpll0_mss",
1847 "mnoc_axi",
1848 "snoc_axi",
1849 NULL
1850 },
1851 .proxy_pd_names = (char*[]){
1852 "cx",
1853 "mx",
1854 NULL
1855 },
1856 .need_mem_protection = true,
1857 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301858 .has_spare_reg = false,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001859 .version = MSS_MSM8998,
1860};
1861
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301862static const struct rproc_hexagon_res msm8996_mss = {
1863 .hexagon_mba_image = "mba.mbn",
Sibi Sankar47b87472018-12-29 00:23:05 +05301864 .proxy_supply = (struct qcom_mss_reg_res[]) {
1865 {
1866 .supply = "pll",
1867 .uA = 100000,
1868 },
1869 {}
1870 },
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301871 .proxy_clk_names = (char*[]){
1872 "xo",
1873 "pnoc",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301874 "qdss",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301875 NULL
1876 },
1877 .active_clk_names = (char*[]){
1878 "iface",
1879 "bus",
1880 "mem",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301881 "gpll0_mss",
1882 "snoc_axi",
1883 "mnoc_axi",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301884 NULL
1885 },
1886 .need_mem_protection = true,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301887 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301888 .has_spare_reg = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301889 .version = MSS_MSM8996,
1890};
1891
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301892static const struct rproc_hexagon_res msm8916_mss = {
1893 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301894 .proxy_supply = (struct qcom_mss_reg_res[]) {
1895 {
1896 .supply = "mx",
1897 .uV = 1050000,
1898 },
1899 {
1900 .supply = "cx",
1901 .uA = 100000,
1902 },
1903 {
1904 .supply = "pll",
1905 .uA = 100000,
1906 },
1907 {}
1908 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301909 .proxy_clk_names = (char*[]){
1910 "xo",
1911 NULL
1912 },
1913 .active_clk_names = (char*[]){
1914 "iface",
1915 "bus",
1916 "mem",
1917 NULL
1918 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301919 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301920 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301921 .has_spare_reg = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301922 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301923};
1924
1925static const struct rproc_hexagon_res msm8974_mss = {
1926 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301927 .proxy_supply = (struct qcom_mss_reg_res[]) {
1928 {
1929 .supply = "mx",
1930 .uV = 1050000,
1931 },
1932 {
1933 .supply = "cx",
1934 .uA = 100000,
1935 },
1936 {
1937 .supply = "pll",
1938 .uA = 100000,
1939 },
1940 {}
1941 },
1942 .active_supply = (struct qcom_mss_reg_res[]) {
1943 {
1944 .supply = "mss",
1945 .uV = 1050000,
1946 .uA = 100000,
1947 },
1948 {}
1949 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301950 .proxy_clk_names = (char*[]){
1951 "xo",
1952 NULL
1953 },
1954 .active_clk_names = (char*[]){
1955 "iface",
1956 "bus",
1957 "mem",
1958 NULL
1959 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301960 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301961 .has_alt_reset = false,
Sibi Sankara9fdc792020-04-15 20:21:10 +05301962 .has_spare_reg = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301963 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301964};
1965
Bjorn Andersson051fb702016-06-20 14:28:41 -07001966static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301967 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1968 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1969 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301970 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001971 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
Sibi Sankar6439b522019-12-19 11:15:06 +05301972 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
Sibi Sankar231f67d2018-05-21 22:57:13 +05301973 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001974 { },
1975};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001976MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001977
1978static struct platform_driver q6v5_driver = {
1979 .probe = q6v5_probe,
1980 .remove = q6v5_remove,
1981 .driver = {
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001982 .name = "qcom-q6v5-mss",
Bjorn Andersson051fb702016-06-20 14:28:41 -07001983 .of_match_table = q6v5_of_match,
1984 },
1985};
1986module_platform_driver(q6v5_driver);
1987
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001988MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001989MODULE_LICENSE("GPL v2");