blob: d7667418a62f4c2d3381f96938bcc1cad267efc3 [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Bjorn Andersson051fb702016-06-20 14:28:41 -07002/*
Bjorn Anderssonef73c222018-09-24 16:45:26 -07003 * Qualcomm self-authenticating modem subsystem remoteproc driver
Bjorn Andersson051fb702016-06-20 14:28:41 -07004 *
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bjorn Andersson051fb702016-06-20 14:28:41 -07008 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
16#include <linux/module.h>
17#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053018#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070019#include <linux/platform_device.h>
Rajendra Nayak4760a892019-01-30 16:39:30 -080020#include <linux/pm_domain.h>
21#include <linux/pm_runtime.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070022#include <linux/regmap.h>
23#include <linux/regulator/consumer.h>
24#include <linux/remoteproc.h>
25#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080026#include <linux/soc/qcom/mdt_loader.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053027#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070028
29#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080030#include "qcom_common.h"
Bjorn Andersson7d674732018-06-04 13:30:38 -070031#include "qcom_q6v5.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070032
33#include <linux/qcom_scm.h>
34
Bjorn Andersson051fb702016-06-20 14:28:41 -070035#define MPSS_CRASH_REASON_SMEM 421
36
37/* RMB Status Register Values */
38#define RMB_PBL_SUCCESS 0x1
39
40#define RMB_MBA_XPU_UNLOCKED 0x1
41#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
42#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
43#define RMB_MBA_AUTH_COMPLETE 0x4
44
45/* PBL/MBA interface registers */
46#define RMB_MBA_IMAGE_REG 0x00
47#define RMB_PBL_STATUS_REG 0x04
48#define RMB_MBA_COMMAND_REG 0x08
49#define RMB_MBA_STATUS_REG 0x0C
50#define RMB_PMI_META_DATA_REG 0x10
51#define RMB_PMI_CODE_START_REG 0x14
52#define RMB_PMI_CODE_LENGTH_REG 0x18
Sibi Sankar231f67d2018-05-21 22:57:13 +053053#define RMB_MBA_MSS_STATUS 0x40
54#define RMB_MBA_ALT_RESET 0x44
Bjorn Andersson051fb702016-06-20 14:28:41 -070055
56#define RMB_CMD_META_DATA_READY 0x1
57#define RMB_CMD_LOAD_READY 0x2
58
59/* QDSP6SS Register Offsets */
60#define QDSP6SS_RESET_REG 0x014
61#define QDSP6SS_GFMUX_CTL_REG 0x020
62#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053063#define QDSP6SS_MEM_PWR_CTL 0x0B0
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -070064#define QDSP6V6SS_MEM_PWR_CTL 0x034
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053065#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070066
67/* AXI Halt Register Offsets */
68#define AXI_HALTREQ_REG 0x0
69#define AXI_HALTACK_REG 0x4
70#define AXI_IDLE_REG 0x8
Sibi Sankar6439b522019-12-19 11:15:06 +053071#define NAV_AXI_HALTREQ_BIT BIT(0)
72#define NAV_AXI_HALTACK_BIT BIT(1)
73#define NAV_AXI_IDLE_BIT BIT(2)
Sibi Sankar600c39b2020-01-23 18:42:36 +053074#define AXI_GATING_VALID_OVERRIDE BIT(0)
Bjorn Andersson051fb702016-06-20 14:28:41 -070075
Sibi Sankar01bf3fe2020-01-23 18:42:35 +053076#define HALT_ACK_TIMEOUT_US 100000
77#define NAV_HALT_ACK_TIMEOUT_US 200
Bjorn Andersson051fb702016-06-20 14:28:41 -070078
79/* QDSP6SS_RESET */
80#define Q6SS_STOP_CORE BIT(0)
81#define Q6SS_CORE_ARES BIT(1)
82#define Q6SS_BUS_ARES_ENABLE BIT(2)
83
Sibi Sankar7e0f8682020-01-17 19:21:28 +053084/* QDSP6SS CBCR */
85#define Q6SS_CBCR_CLKEN BIT(0)
86#define Q6SS_CBCR_CLKOFF BIT(31)
87#define Q6SS_CBCR_TIMEOUT_US 200
88
Bjorn Andersson051fb702016-06-20 14:28:41 -070089/* QDSP6SS_GFMUX_CTL */
90#define Q6SS_CLK_ENABLE BIT(1)
91
92/* QDSP6SS_PWR_CTL */
93#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
94#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
95#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
96#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
97#define Q6SS_ETB_SLP_NRET_N BIT(17)
98#define Q6SS_L2DATA_STBY_N BIT(18)
99#define Q6SS_SLP_RET_N BIT(19)
100#define Q6SS_CLAMP_IO BIT(20)
101#define QDSS_BHS_ON BIT(21)
102#define QDSS_LDO_BYP BIT(22)
103
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530104/* QDSP6v56 parameters */
105#define QDSP6v56_LDO_BYP BIT(25)
106#define QDSP6v56_BHS_ON BIT(24)
107#define QDSP6v56_CLAMP_WL BIT(21)
108#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530109#define QDSP6SS_XO_CBCR 0x0038
110#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
111
Sibi Sankar231f67d2018-05-21 22:57:13 +0530112/* QDSP6v65 parameters */
Sibi Sankar6439b522019-12-19 11:15:06 +0530113#define QDSP6SS_CORE_CBCR 0x20
Sibi Sankar231f67d2018-05-21 22:57:13 +0530114#define QDSP6SS_SLEEP 0x3C
115#define QDSP6SS_BOOT_CORE_START 0x400
116#define QDSP6SS_BOOT_CMD 0x404
Sibi Sankar6439b522019-12-19 11:15:06 +0530117#define QDSP6SS_BOOT_STATUS 0x408
Sibi Sankar0c2caf72020-01-17 19:21:29 +0530118#define BOOT_STATUS_TIMEOUT_US 200
Sibi Sankar231f67d2018-05-21 22:57:13 +0530119#define BOOT_FSM_TIMEOUT 10000
120
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530121struct reg_info {
122 struct regulator *reg;
123 int uV;
124 int uA;
125};
126
127struct qcom_mss_reg_res {
128 const char *supply;
129 int uV;
130 int uA;
131};
132
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530133struct rproc_hexagon_res {
134 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100135 struct qcom_mss_reg_res *proxy_supply;
136 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530137 char **proxy_clk_names;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530138 char **reset_clk_names;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530139 char **active_clk_names;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800140 char **active_pd_names;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800141 char **proxy_pd_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530142 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530143 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530144 bool has_alt_reset;
Sibi Sankar6439b522019-12-19 11:15:06 +0530145 bool has_halt_nav;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530146};
147
Bjorn Andersson051fb702016-06-20 14:28:41 -0700148struct q6v5 {
149 struct device *dev;
150 struct rproc *rproc;
151
152 void __iomem *reg_base;
153 void __iomem *rmb_base;
154
155 struct regmap *halt_map;
Sibi Sankar6439b522019-12-19 11:15:06 +0530156 struct regmap *halt_nav_map;
157 struct regmap *conn_map;
158
Bjorn Andersson051fb702016-06-20 14:28:41 -0700159 u32 halt_q6;
160 u32 halt_modem;
161 u32 halt_nc;
Sibi Sankar6439b522019-12-19 11:15:06 +0530162 u32 halt_nav;
163 u32 conn_box;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700164
165 struct reset_control *mss_restart;
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530166 struct reset_control *pdc_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700167
Bjorn Andersson7d674732018-06-04 13:30:38 -0700168 struct qcom_q6v5 q6v5;
Sibi Sankar663e9842018-05-21 22:57:09 +0530169
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530170 struct clk *active_clks[8];
Sibi Sankar231f67d2018-05-21 22:57:13 +0530171 struct clk *reset_clks[4];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530172 struct clk *proxy_clks[4];
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800173 struct device *active_pds[1];
Rajendra Nayak4760a892019-01-30 16:39:30 -0800174 struct device *proxy_pds[3];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530175 int active_clk_count;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530176 int reset_clk_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530177 int proxy_clk_count;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800178 int active_pd_count;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800179 int proxy_pd_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530180
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530181 struct reg_info active_regs[1];
182 struct reg_info proxy_regs[3];
183 int active_reg_count;
184 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700185
Bjorn Andersson051fb702016-06-20 14:28:41 -0700186 bool running;
187
Sibi Sankar03045302018-10-17 19:25:25 +0530188 bool dump_mba_loaded;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +0530189 unsigned long dump_segment_mask;
190 unsigned long dump_complete_mask;
191
Bjorn Andersson051fb702016-06-20 14:28:41 -0700192 phys_addr_t mba_phys;
193 void *mba_region;
194 size_t mba_size;
195
196 phys_addr_t mpss_phys;
197 phys_addr_t mpss_reloc;
198 void *mpss_region;
199 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800200
Sibi Sankar47254962018-05-21 22:57:14 +0530201 struct qcom_rproc_glink glink_subdev;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800202 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700203 struct qcom_rproc_ssr ssr_subdev;
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700204 struct qcom_sysmon *sysmon;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530205 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530206 bool has_alt_reset;
Sibi Sankar6439b522019-12-19 11:15:06 +0530207 bool has_halt_nav;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530208 int mpss_perm;
209 int mba_perm;
Sibi Sankara5a4e022019-01-15 01:20:01 +0530210 const char *hexagon_mdt_image;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530211 int version;
212};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530213
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530214enum {
215 MSS_MSM8916,
216 MSS_MSM8974,
217 MSS_MSM8996,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700218 MSS_MSM8998,
Sibi Sankar6439b522019-12-19 11:15:06 +0530219 MSS_SC7180,
Sibi Sankar231f67d2018-05-21 22:57:13 +0530220 MSS_SDM845,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700221};
222
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530223static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
224 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700225{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530226 int rc;
227 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700228
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800229 if (!reg_res)
230 return 0;
231
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530232 for (i = 0; reg_res[i].supply; i++) {
233 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
234 if (IS_ERR(regs[i].reg)) {
235 rc = PTR_ERR(regs[i].reg);
236 if (rc != -EPROBE_DEFER)
237 dev_err(dev, "Failed to get %s\n regulator",
238 reg_res[i].supply);
239 return rc;
240 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700241
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530242 regs[i].uV = reg_res[i].uV;
243 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700244 }
245
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530246 return i;
247}
248
249static int q6v5_regulator_enable(struct q6v5 *qproc,
250 struct reg_info *regs, int count)
251{
252 int ret;
253 int i;
254
255 for (i = 0; i < count; i++) {
256 if (regs[i].uV > 0) {
257 ret = regulator_set_voltage(regs[i].reg,
258 regs[i].uV, INT_MAX);
259 if (ret) {
260 dev_err(qproc->dev,
261 "Failed to request voltage for %d.\n",
262 i);
263 goto err;
264 }
265 }
266
267 if (regs[i].uA > 0) {
268 ret = regulator_set_load(regs[i].reg,
269 regs[i].uA);
270 if (ret < 0) {
271 dev_err(qproc->dev,
272 "Failed to set regulator mode\n");
273 goto err;
274 }
275 }
276
277 ret = regulator_enable(regs[i].reg);
278 if (ret) {
279 dev_err(qproc->dev, "Regulator enable failed\n");
280 goto err;
281 }
282 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700283
284 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530285err:
286 for (; i >= 0; i--) {
287 if (regs[i].uV > 0)
288 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
289
290 if (regs[i].uA > 0)
291 regulator_set_load(regs[i].reg, 0);
292
293 regulator_disable(regs[i].reg);
294 }
295
296 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700297}
298
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530299static void q6v5_regulator_disable(struct q6v5 *qproc,
300 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700301{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530302 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700303
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530304 for (i = 0; i < count; i++) {
305 if (regs[i].uV > 0)
306 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700307
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530308 if (regs[i].uA > 0)
309 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700310
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530311 regulator_disable(regs[i].reg);
312 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700313}
314
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530315static int q6v5_clk_enable(struct device *dev,
316 struct clk **clks, int count)
317{
318 int rc;
319 int i;
320
321 for (i = 0; i < count; i++) {
322 rc = clk_prepare_enable(clks[i]);
323 if (rc) {
324 dev_err(dev, "Clock enable failed\n");
325 goto err;
326 }
327 }
328
329 return 0;
330err:
331 for (i--; i >= 0; i--)
332 clk_disable_unprepare(clks[i]);
333
334 return rc;
335}
336
337static void q6v5_clk_disable(struct device *dev,
338 struct clk **clks, int count)
339{
340 int i;
341
342 for (i = 0; i < count; i++)
343 clk_disable_unprepare(clks[i]);
344}
345
Rajendra Nayak4760a892019-01-30 16:39:30 -0800346static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
347 size_t pd_count)
348{
349 int ret;
350 int i;
351
352 for (i = 0; i < pd_count; i++) {
353 dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
354 ret = pm_runtime_get_sync(pds[i]);
355 if (ret < 0)
356 goto unroll_pd_votes;
357 }
358
359 return 0;
360
361unroll_pd_votes:
362 for (i--; i >= 0; i--) {
363 dev_pm_genpd_set_performance_state(pds[i], 0);
364 pm_runtime_put(pds[i]);
365 }
366
367 return ret;
368};
369
370static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
371 size_t pd_count)
372{
373 int i;
374
375 for (i = 0; i < pd_count; i++) {
376 dev_pm_genpd_set_performance_state(pds[i], 0);
377 pm_runtime_put(pds[i]);
378 }
379}
380
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530381static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +0530382 bool local, bool remote, phys_addr_t addr,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530383 size_t size)
384{
Bjorn Andersson715d8522020-03-05 01:17:28 +0530385 struct qcom_scm_vmperm next[2];
386 int perms = 0;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530387
388 if (!qproc->need_mem_protection)
389 return 0;
Bjorn Andersson715d8522020-03-05 01:17:28 +0530390
391 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
392 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530393 return 0;
394
Bjorn Andersson715d8522020-03-05 01:17:28 +0530395 if (local) {
396 next[perms].vmid = QCOM_SCM_VMID_HLOS;
397 next[perms].perm = QCOM_SCM_PERM_RWX;
398 perms++;
399 }
400
401 if (remote) {
402 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
403 next[perms].perm = QCOM_SCM_PERM_RW;
404 perms++;
405 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530406
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800407 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
Bjorn Andersson715d8522020-03-05 01:17:28 +0530408 current_perm, next, perms);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530409}
410
Bjorn Andersson051fb702016-06-20 14:28:41 -0700411static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
412{
413 struct q6v5 *qproc = rproc->priv;
414
415 memcpy(qproc->mba_region, fw->data, fw->size);
416
417 return 0;
418}
419
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530420static int q6v5_reset_assert(struct q6v5 *qproc)
421{
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530422 int ret;
423
424 if (qproc->has_alt_reset) {
425 reset_control_assert(qproc->pdc_reset);
426 ret = reset_control_reset(qproc->mss_restart);
427 reset_control_deassert(qproc->pdc_reset);
Sibi Sankar6439b522019-12-19 11:15:06 +0530428 } else if (qproc->has_halt_nav) {
Sibi Sankar600c39b2020-01-23 18:42:36 +0530429 /*
430 * When the AXI pipeline is being reset with the Q6 modem partly
431 * operational there is possibility of AXI valid signal to
432 * glitch, leading to spurious transactions and Q6 hangs. A work
433 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
434 * BIT before triggering Q6 MSS reset. Both the HALTREQ and
435 * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
436 * followed by a MSS deassert, while holding the PDC reset.
437 */
Sibi Sankar6439b522019-12-19 11:15:06 +0530438 reset_control_assert(qproc->pdc_reset);
439 regmap_update_bits(qproc->conn_map, qproc->conn_box,
Sibi Sankar600c39b2020-01-23 18:42:36 +0530440 AXI_GATING_VALID_OVERRIDE, 1);
Sibi Sankar6439b522019-12-19 11:15:06 +0530441 regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav,
442 NAV_AXI_HALTREQ_BIT, 0);
443 reset_control_assert(qproc->mss_restart);
444 reset_control_deassert(qproc->pdc_reset);
445 regmap_update_bits(qproc->conn_map, qproc->conn_box,
Sibi Sankar600c39b2020-01-23 18:42:36 +0530446 AXI_GATING_VALID_OVERRIDE, 0);
Sibi Sankar6439b522019-12-19 11:15:06 +0530447 ret = reset_control_deassert(qproc->mss_restart);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530448 } else {
449 ret = reset_control_assert(qproc->mss_restart);
450 }
451
452 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530453}
454
455static int q6v5_reset_deassert(struct q6v5 *qproc)
456{
Sibi Sankar231f67d2018-05-21 22:57:13 +0530457 int ret;
458
459 if (qproc->has_alt_reset) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530460 reset_control_assert(qproc->pdc_reset);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530461 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
462 ret = reset_control_reset(qproc->mss_restart);
463 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530464 reset_control_deassert(qproc->pdc_reset);
Sibi Sankar6439b522019-12-19 11:15:06 +0530465 } else if (qproc->has_halt_nav) {
466 ret = reset_control_reset(qproc->mss_restart);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530467 } else {
468 ret = reset_control_deassert(qproc->mss_restart);
469 }
470
471 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530472}
473
Bjorn Andersson051fb702016-06-20 14:28:41 -0700474static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
475{
476 unsigned long timeout;
477 s32 val;
478
479 timeout = jiffies + msecs_to_jiffies(ms);
480 for (;;) {
481 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
482 if (val)
483 break;
484
485 if (time_after(jiffies, timeout))
486 return -ETIMEDOUT;
487
488 msleep(1);
489 }
490
491 return val;
492}
493
494static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
495{
496
497 unsigned long timeout;
498 s32 val;
499
500 timeout = jiffies + msecs_to_jiffies(ms);
501 for (;;) {
502 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
503 if (val < 0)
504 break;
505
506 if (!status && val)
507 break;
508 else if (status && val == status)
509 break;
510
511 if (time_after(jiffies, timeout))
512 return -ETIMEDOUT;
513
514 msleep(1);
515 }
516
517 return val;
518}
519
520static int q6v5proc_reset(struct q6v5 *qproc)
521{
522 u32 val;
523 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530524 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700525
Sibi Sankar231f67d2018-05-21 22:57:13 +0530526 if (qproc->version == MSS_SDM845) {
527 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530528 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530529 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700530
Sibi Sankar231f67d2018-05-21 22:57:13 +0530531 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530532 val, !(val & Q6SS_CBCR_CLKOFF), 1,
533 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530534 if (ret) {
535 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
536 return -ETIMEDOUT;
537 }
538
539 /* De-assert QDSP6 stop core */
540 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
541 /* Trigger boot FSM */
542 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
543
544 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
545 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
546 if (ret) {
547 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
548 /* Reset the modem so that boot FSM is in reset state */
549 q6v5_reset_deassert(qproc);
550 return ret;
551 }
552
553 goto pbl_wait;
Sibi Sankar6439b522019-12-19 11:15:06 +0530554 } else if (qproc->version == MSS_SC7180) {
555 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530556 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530557 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
558
559 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530560 val, !(val & Q6SS_CBCR_CLKOFF), 1,
561 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530562 if (ret) {
563 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
564 return -ETIMEDOUT;
565 }
566
567 /* Turn on the XO clock needed for PLL setup */
568 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530569 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530570 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
571
572 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530573 val, !(val & Q6SS_CBCR_CLKOFF), 1,
574 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530575 if (ret) {
576 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
577 return -ETIMEDOUT;
578 }
579
580 /* Configure Q6 core CBCR to auto-enable after reset sequence */
581 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530582 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530583 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
584
585 /* De-assert the Q6 stop core signal */
586 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
587
588 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
589 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
590
591 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
592 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
593 val, (val & BIT(0)) != 0, 1,
Sibi Sankar0c2caf72020-01-17 19:21:29 +0530594 BOOT_STATUS_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530595 if (ret) {
596 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
597 /* Reset the modem so that boot FSM is in reset state */
598 q6v5_reset_deassert(qproc);
599 return ret;
600 }
601 goto pbl_wait;
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700602 } else if (qproc->version == MSS_MSM8996 ||
603 qproc->version == MSS_MSM8998) {
604 int mem_pwr_ctl;
605
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530606 /* Override the ACC value if required */
607 writel(QDSP6SS_ACC_OVERRIDE_VAL,
608 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700609
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530610 /* Assert resets, stop core */
611 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
612 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
613 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700614
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530615 /* BHS require xo cbcr to be enabled */
616 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530617 val |= Q6SS_CBCR_CLKEN;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530618 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
619
620 /* Read CLKOFF bit to go low indicating CLK is enabled */
621 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530622 val, !(val & Q6SS_CBCR_CLKOFF), 1,
623 Q6SS_CBCR_TIMEOUT_US);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530624 if (ret) {
625 dev_err(qproc->dev,
626 "xo cbcr enabling timed out (rc:%d)\n", ret);
627 return ret;
628 }
629 /* Enable power block headswitch and wait for it to stabilize */
630 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
631 val |= QDSP6v56_BHS_ON;
632 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
633 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
634 udelay(1);
635
636 /* Put LDO in bypass mode */
637 val |= QDSP6v56_LDO_BYP;
638 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
639
640 /* Deassert QDSP6 compiler memory clamp */
641 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
642 val &= ~QDSP6v56_CLAMP_QMC_MEM;
643 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
644
645 /* Deassert memory peripheral sleep and L2 memory standby */
646 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
647 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
648
649 /* Turn on L1, L2, ETB and JU memories 1 at a time */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700650 if (qproc->version == MSS_MSM8996) {
651 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
652 i = 19;
653 } else {
654 /* MSS_MSM8998 */
655 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
656 i = 28;
657 }
658 val = readl(qproc->reg_base + mem_pwr_ctl);
659 for (; i >= 0; i--) {
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530660 val |= BIT(i);
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700661 writel(val, qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530662 /*
663 * Read back value to ensure the write is done then
664 * wait for 1us for both memory peripheral and data
665 * array to turn on.
666 */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700667 val |= readl(qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530668 udelay(1);
669 }
670 /* Remove word line clamp */
671 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
672 val &= ~QDSP6v56_CLAMP_WL;
673 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
674 } else {
675 /* Assert resets, stop core */
676 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
677 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
678 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
679
680 /* Enable power block headswitch and wait for it to stabilize */
681 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
682 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
683 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
684 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
685 udelay(1);
686 /*
687 * Turn on memories. L2 banks should be done individually
688 * to minimize inrush current.
689 */
690 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
691 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
692 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
693 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
694 val |= Q6SS_L2DATA_SLP_NRET_N_2;
695 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
696 val |= Q6SS_L2DATA_SLP_NRET_N_1;
697 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
698 val |= Q6SS_L2DATA_SLP_NRET_N_0;
699 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
700 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700701 /* Remove IO clamp */
702 val &= ~Q6SS_CLAMP_IO;
703 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
704
705 /* Bring core out of reset */
706 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
707 val &= ~Q6SS_CORE_ARES;
708 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
709
710 /* Turn on core clock */
711 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
712 val |= Q6SS_CLK_ENABLE;
713 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
714
715 /* Start core execution */
716 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
717 val &= ~Q6SS_STOP_CORE;
718 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
719
Sibi Sankar231f67d2018-05-21 22:57:13 +0530720pbl_wait:
Bjorn Andersson051fb702016-06-20 14:28:41 -0700721 /* Wait for PBL status */
722 ret = q6v5_rmb_pbl_wait(qproc, 1000);
723 if (ret == -ETIMEDOUT) {
724 dev_err(qproc->dev, "PBL boot timed out\n");
725 } else if (ret != RMB_PBL_SUCCESS) {
726 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
727 ret = -EINVAL;
728 } else {
729 ret = 0;
730 }
731
732 return ret;
733}
734
735static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
736 struct regmap *halt_map,
737 u32 offset)
738{
Bjorn Andersson051fb702016-06-20 14:28:41 -0700739 unsigned int val;
740 int ret;
741
742 /* Check if we're already idle */
743 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
744 if (!ret && val)
745 return;
746
747 /* Assert halt request */
748 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
749
750 /* Wait for halt */
Sibi Sankar01bf3fe2020-01-23 18:42:35 +0530751 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
752 val, 1000, HALT_ACK_TIMEOUT_US);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700753
754 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
755 if (ret || !val)
756 dev_err(qproc->dev, "port failed halt\n");
757
758 /* Clear halt request (port will remain halted until reset) */
759 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
760}
761
Sibi Sankar6439b522019-12-19 11:15:06 +0530762static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc,
763 struct regmap *halt_map,
764 u32 offset)
765{
Sibi Sankar6439b522019-12-19 11:15:06 +0530766 unsigned int val;
767 int ret;
768
769 /* Check if we're already idle */
770 ret = regmap_read(halt_map, offset, &val);
771 if (!ret && (val & NAV_AXI_IDLE_BIT))
772 return;
773
774 /* Assert halt request */
775 regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT,
776 NAV_AXI_HALTREQ_BIT);
777
778 /* Wait for halt ack*/
Sibi Sankar01bf3fe2020-01-23 18:42:35 +0530779 regmap_read_poll_timeout(halt_map, offset, val,
780 (val & NAV_AXI_HALTACK_BIT),
781 5, NAV_HALT_ACK_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530782
783 ret = regmap_read(halt_map, offset, &val);
784 if (ret || !(val & NAV_AXI_IDLE_BIT))
785 dev_err(qproc->dev, "port failed halt\n");
786}
787
Bjorn Andersson051fb702016-06-20 14:28:41 -0700788static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
789{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700790 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700791 dma_addr_t phys;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700792 void *metadata;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530793 int mdata_perm;
794 int xferop_ret;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700795 size_t size;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700796 void *ptr;
797 int ret;
798
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700799 metadata = qcom_mdt_read_metadata(fw, &size);
800 if (IS_ERR(metadata))
801 return PTR_ERR(metadata);
802
803 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700804 if (!ptr) {
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700805 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700806 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
807 return -ENOMEM;
808 }
809
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700810 memcpy(ptr, metadata, size);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700811
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530812 /* Hypervisor mapping to access metadata by modem */
813 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Andersson715d8522020-03-05 01:17:28 +0530814 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
815 phys, size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800816 if (ret) {
817 dev_err(qproc->dev,
818 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100819 ret = -EAGAIN;
820 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800821 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530822
Bjorn Andersson051fb702016-06-20 14:28:41 -0700823 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
824 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
825
826 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
827 if (ret == -ETIMEDOUT)
828 dev_err(qproc->dev, "MPSS header authentication timed out\n");
829 else if (ret < 0)
830 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
831
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530832 /* Metadata authentication done, remove modem access */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530833 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
834 phys, size);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530835 if (xferop_ret)
836 dev_warn(qproc->dev,
837 "mdt buffer not reclaimed system may become unstable\n");
838
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100839free_dma_attrs:
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700840 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
841 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700842
843 return ret < 0 ? ret : 0;
844}
845
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800846static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
847{
848 if (phdr->p_type != PT_LOAD)
849 return false;
850
851 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
852 return false;
853
854 if (!phdr->p_memsz)
855 return false;
856
857 return true;
858}
859
Sibi Sankar03045302018-10-17 19:25:25 +0530860static int q6v5_mba_load(struct q6v5 *qproc)
861{
862 int ret;
863 int xfermemop_ret;
864
865 qcom_q6v5_prepare(&qproc->q6v5);
866
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800867 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
868 if (ret < 0) {
869 dev_err(qproc->dev, "failed to enable active power domains\n");
870 goto disable_irqs;
871 }
872
Rajendra Nayak4760a892019-01-30 16:39:30 -0800873 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
874 if (ret < 0) {
875 dev_err(qproc->dev, "failed to enable proxy power domains\n");
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800876 goto disable_active_pds;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800877 }
878
Sibi Sankar03045302018-10-17 19:25:25 +0530879 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
880 qproc->proxy_reg_count);
881 if (ret) {
882 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Rajendra Nayak4760a892019-01-30 16:39:30 -0800883 goto disable_proxy_pds;
Sibi Sankar03045302018-10-17 19:25:25 +0530884 }
885
886 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
887 qproc->proxy_clk_count);
888 if (ret) {
889 dev_err(qproc->dev, "failed to enable proxy clocks\n");
890 goto disable_proxy_reg;
891 }
892
893 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
894 qproc->active_reg_count);
895 if (ret) {
896 dev_err(qproc->dev, "failed to enable supplies\n");
897 goto disable_proxy_clk;
898 }
899
900 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
901 qproc->reset_clk_count);
902 if (ret) {
903 dev_err(qproc->dev, "failed to enable reset clocks\n");
904 goto disable_vdd;
905 }
906
907 ret = q6v5_reset_deassert(qproc);
908 if (ret) {
909 dev_err(qproc->dev, "failed to deassert mss restart\n");
910 goto disable_reset_clks;
911 }
912
913 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
914 qproc->active_clk_count);
915 if (ret) {
916 dev_err(qproc->dev, "failed to enable clocks\n");
917 goto assert_reset;
918 }
919
920 /* Assign MBA image access in DDR to q6 */
Bjorn Andersson715d8522020-03-05 01:17:28 +0530921 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
Sibi Sankar03045302018-10-17 19:25:25 +0530922 qproc->mba_phys, qproc->mba_size);
923 if (ret) {
924 dev_err(qproc->dev,
925 "assigning Q6 access to mba memory failed: %d\n", ret);
926 goto disable_active_clks;
927 }
928
929 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
930
931 ret = q6v5proc_reset(qproc);
932 if (ret)
933 goto reclaim_mba;
934
935 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
936 if (ret == -ETIMEDOUT) {
937 dev_err(qproc->dev, "MBA boot timed out\n");
938 goto halt_axi_ports;
939 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
940 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
941 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
942 ret = -EINVAL;
943 goto halt_axi_ports;
944 }
945
946 qproc->dump_mba_loaded = true;
947 return 0;
948
949halt_axi_ports:
950 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
951 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
Sibi Sankar6439b522019-12-19 11:15:06 +0530952 if (qproc->has_halt_nav)
953 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
954 qproc->halt_nav);
Sibi Sankar03045302018-10-17 19:25:25 +0530955 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
956
957reclaim_mba:
Bjorn Andersson715d8522020-03-05 01:17:28 +0530958 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
959 false, qproc->mba_phys,
Sibi Sankar03045302018-10-17 19:25:25 +0530960 qproc->mba_size);
961 if (xfermemop_ret) {
962 dev_err(qproc->dev,
963 "Failed to reclaim mba buffer, system may become unstable\n");
964 }
965
966disable_active_clks:
967 q6v5_clk_disable(qproc->dev, qproc->active_clks,
968 qproc->active_clk_count);
969assert_reset:
970 q6v5_reset_assert(qproc);
971disable_reset_clks:
972 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
973 qproc->reset_clk_count);
974disable_vdd:
975 q6v5_regulator_disable(qproc, qproc->active_regs,
976 qproc->active_reg_count);
977disable_proxy_clk:
978 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
979 qproc->proxy_clk_count);
980disable_proxy_reg:
981 q6v5_regulator_disable(qproc, qproc->proxy_regs,
982 qproc->proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -0800983disable_proxy_pds:
984 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800985disable_active_pds:
986 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +0530987disable_irqs:
988 qcom_q6v5_unprepare(&qproc->q6v5);
989
990 return ret;
991}
992
993static void q6v5_mba_reclaim(struct q6v5 *qproc)
994{
995 int ret;
996 u32 val;
997
998 qproc->dump_mba_loaded = false;
999
1000 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1001 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
Sibi Sankar6439b522019-12-19 11:15:06 +05301002 if (qproc->has_halt_nav)
1003 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
1004 qproc->halt_nav);
Sibi Sankar03045302018-10-17 19:25:25 +05301005 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1006 if (qproc->version == MSS_MSM8996) {
1007 /*
1008 * To avoid high MX current during LPASS/MSS restart.
1009 */
1010 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1011 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
1012 QDSP6v56_CLAMP_QMC_MEM;
1013 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1014 }
1015
Sibi Sankar03045302018-10-17 19:25:25 +05301016 q6v5_reset_assert(qproc);
1017
1018 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1019 qproc->reset_clk_count);
1020 q6v5_clk_disable(qproc->dev, qproc->active_clks,
1021 qproc->active_clk_count);
1022 q6v5_regulator_disable(qproc, qproc->active_regs,
1023 qproc->active_reg_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001024 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301025
1026 /* In case of failure or coredump scenario where reclaiming MBA memory
1027 * could not happen reclaim it here.
1028 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301029 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
Sibi Sankar03045302018-10-17 19:25:25 +05301030 qproc->mba_phys,
1031 qproc->mba_size);
1032 WARN_ON(ret);
1033
1034 ret = qcom_q6v5_unprepare(&qproc->q6v5);
1035 if (ret) {
Rajendra Nayak4760a892019-01-30 16:39:30 -08001036 q6v5_pds_disable(qproc, qproc->proxy_pds,
1037 qproc->proxy_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301038 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1039 qproc->proxy_clk_count);
1040 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1041 qproc->proxy_reg_count);
1042 }
1043}
1044
Sibi Sankard96f2572020-03-05 01:17:29 +05301045static int q6v5_reload_mba(struct rproc *rproc)
1046{
1047 struct q6v5 *qproc = rproc->priv;
1048 const struct firmware *fw;
1049 int ret;
1050
1051 ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1052 if (ret < 0)
1053 return ret;
1054
1055 q6v5_load(rproc, fw);
1056 ret = q6v5_mba_load(qproc);
1057 release_firmware(fw);
1058
1059 return ret;
1060}
1061
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001062static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001063{
1064 const struct elf32_phdr *phdrs;
1065 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001066 const struct firmware *seg_fw;
1067 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001068 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001069 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001070 phys_addr_t boot_addr;
Stefan Agnerd7dc8992018-06-14 15:28:02 -07001071 phys_addr_t min_addr = PHYS_ADDR_MAX;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001072 phys_addr_t max_addr = 0;
Bjorn Andersson715d8522020-03-05 01:17:28 +05301073 u32 code_length;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001074 bool relocate = false;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301075 char *fw_name;
1076 size_t fw_name_len;
Bjorn Andersson01625cc52017-02-15 14:00:41 -08001077 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301078 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001079 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001080 int ret;
1081 int i;
1082
Sibi Sankara5a4e022019-01-15 01:20:01 +05301083 fw_name_len = strlen(qproc->hexagon_mdt_image);
1084 if (fw_name_len <= 4)
1085 return -EINVAL;
1086
1087 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1088 if (!fw_name)
1089 return -ENOMEM;
1090
1091 ret = request_firmware(&fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001092 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301093 dev_err(qproc->dev, "unable to load %s\n", fw_name);
1094 goto out;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001095 }
1096
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001097 /* Initialize the RMB validator */
1098 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1099
1100 ret = q6v5_mpss_init_image(qproc, fw);
1101 if (ret)
1102 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001103
1104 ehdr = (struct elf32_hdr *)fw->data;
1105 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001106
1107 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001108 phdr = &phdrs[i];
1109
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001110 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001111 continue;
1112
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001113 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1114 relocate = true;
1115
1116 if (phdr->p_paddr < min_addr)
1117 min_addr = phdr->p_paddr;
1118
1119 if (phdr->p_paddr + phdr->p_memsz > max_addr)
1120 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1121 }
1122
Bjorn Andersson900fc602020-03-05 01:17:27 +05301123 /**
1124 * In case of a modem subsystem restart on secure devices, the modem
1125 * memory can be reclaimed only after MBA is loaded. For modem cold
1126 * boot this will be a nop
1127 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301128 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301129 qproc->mpss_phys, qproc->mpss_size);
1130
Bjorn Andersson715d8522020-03-05 01:17:28 +05301131 /* Share ownership between Linux and MSS, during segment loading */
1132 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1133 qproc->mpss_phys, qproc->mpss_size);
1134 if (ret) {
1135 dev_err(qproc->dev,
1136 "assigning Q6 access to mpss memory failed: %d\n", ret);
1137 ret = -EAGAIN;
1138 goto release_firmware;
1139 }
1140
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001141 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Sibi Sankar3bf62eb2018-07-27 20:50:03 +05301142 qproc->mpss_reloc = mpss_reloc;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301143 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001144 for (i = 0; i < ehdr->e_phnum; i++) {
1145 phdr = &phdrs[i];
1146
1147 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001148 continue;
1149
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001150 offset = phdr->p_paddr - mpss_reloc;
1151 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1152 dev_err(qproc->dev, "segment outside memory range\n");
1153 ret = -EINVAL;
1154 goto release_firmware;
1155 }
1156
1157 ptr = qproc->mpss_region + offset;
1158
Bjorn Anderssonf04b9132019-06-21 18:21:46 -07001159 if (phdr->p_filesz && phdr->p_offset < fw->size) {
1160 /* Firmware is large enough to be non-split */
1161 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1162 dev_err(qproc->dev,
1163 "failed to load segment %d from truncated file %s\n",
1164 i, fw_name);
1165 ret = -EINVAL;
1166 goto release_firmware;
1167 }
1168
1169 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1170 } else if (phdr->p_filesz) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301171 /* Replace "xxx.xxx" with "xxx.bxx" */
1172 sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1173 ret = request_firmware(&seg_fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001174 if (ret) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301175 dev_err(qproc->dev, "failed to load %s\n", fw_name);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001176 goto release_firmware;
1177 }
1178
1179 memcpy(ptr, seg_fw->data, seg_fw->size);
1180
1181 release_firmware(seg_fw);
1182 }
1183
1184 if (phdr->p_memsz > phdr->p_filesz) {
1185 memset(ptr + phdr->p_filesz, 0,
1186 phdr->p_memsz - phdr->p_filesz);
1187 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001188 size += phdr->p_memsz;
Bjorn Andersson715d8522020-03-05 01:17:28 +05301189
1190 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1191 if (!code_length) {
1192 boot_addr = relocate ? qproc->mpss_phys : min_addr;
1193 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1194 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1195 }
1196 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1197
1198 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1199 if (ret < 0) {
1200 dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1201 ret);
1202 goto release_firmware;
1203 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001204 }
1205
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301206 /* Transfer ownership of modem ddr region to q6 */
Bjorn Andersson715d8522020-03-05 01:17:28 +05301207 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301208 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001209 if (ret) {
1210 dev_err(qproc->dev,
1211 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +01001212 ret = -EAGAIN;
1213 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001214 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301215
Bjorn Andersson72beb492016-07-12 17:15:45 -07001216 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1217 if (ret == -ETIMEDOUT)
1218 dev_err(qproc->dev, "MPSS authentication timed out\n");
1219 else if (ret < 0)
1220 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1221
Bjorn Andersson051fb702016-06-20 14:28:41 -07001222release_firmware:
1223 release_firmware(fw);
Sibi Sankara5a4e022019-01-15 01:20:01 +05301224out:
1225 kfree(fw_name);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001226
1227 return ret < 0 ? ret : 0;
1228}
1229
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301230static void qcom_q6v5_dump_segment(struct rproc *rproc,
1231 struct rproc_dump_segment *segment,
1232 void *dest)
1233{
1234 int ret = 0;
1235 struct q6v5 *qproc = rproc->priv;
1236 unsigned long mask = BIT((unsigned long)segment->priv);
1237 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1238
1239 /* Unlock mba before copying segments */
Bjorn Andersson900fc602020-03-05 01:17:27 +05301240 if (!qproc->dump_mba_loaded) {
Sibi Sankard96f2572020-03-05 01:17:29 +05301241 ret = q6v5_reload_mba(rproc);
Bjorn Andersson900fc602020-03-05 01:17:27 +05301242 if (!ret) {
1243 /* Reset ownership back to Linux to copy segments */
1244 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +05301245 true, false,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301246 qproc->mpss_phys,
1247 qproc->mpss_size);
1248 }
1249 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301250
1251 if (!ptr || ret)
1252 memset(dest, 0xff, segment->size);
1253 else
1254 memcpy(dest, ptr, segment->size);
1255
1256 qproc->dump_segment_mask |= mask;
1257
1258 /* Reclaim mba after copying segments */
1259 if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
Bjorn Andersson900fc602020-03-05 01:17:27 +05301260 if (qproc->dump_mba_loaded) {
1261 /* Try to reset ownership back to Q6 */
1262 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
Bjorn Andersson715d8522020-03-05 01:17:28 +05301263 false, true,
Bjorn Andersson900fc602020-03-05 01:17:27 +05301264 qproc->mpss_phys,
1265 qproc->mpss_size);
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301266 q6v5_mba_reclaim(qproc);
Bjorn Andersson900fc602020-03-05 01:17:27 +05301267 }
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301268 }
1269}
1270
Bjorn Andersson051fb702016-06-20 14:28:41 -07001271static int q6v5_start(struct rproc *rproc)
1272{
1273 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301274 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001275 int ret;
1276
Sibi Sankar03045302018-10-17 19:25:25 +05301277 ret = q6v5_mba_load(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001278 if (ret)
Sibi Sankar03045302018-10-17 19:25:25 +05301279 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001280
1281 dev_info(qproc->dev, "MBA booted, loading mpss\n");
1282
1283 ret = q6v5_mpss_load(qproc);
1284 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301285 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001286
Bjorn Andersson7d674732018-06-04 13:30:38 -07001287 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1288 if (ret == -ETIMEDOUT) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001289 dev_err(qproc->dev, "start timed out\n");
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301290 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001291 }
1292
Bjorn Andersson715d8522020-03-05 01:17:28 +05301293 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1294 false, qproc->mba_phys,
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301295 qproc->mba_size);
1296 if (xfermemop_ret)
1297 dev_err(qproc->dev,
1298 "Failed to reclaim mba buffer system may become unstable\n");
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301299
1300 /* Reset Dump Segment Mask */
1301 qproc->dump_segment_mask = 0;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001302 qproc->running = true;
1303
Bjorn Andersson051fb702016-06-20 14:28:41 -07001304 return 0;
1305
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301306reclaim_mpss:
Sibi Sankar03045302018-10-17 19:25:25 +05301307 q6v5_mba_reclaim(qproc);
Sibi Sankar663e9842018-05-21 22:57:09 +05301308
Bjorn Andersson051fb702016-06-20 14:28:41 -07001309 return ret;
1310}
1311
1312static int q6v5_stop(struct rproc *rproc)
1313{
1314 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1315 int ret;
1316
1317 qproc->running = false;
1318
Bjorn Andersson7d674732018-06-04 13:30:38 -07001319 ret = qcom_q6v5_request_stop(&qproc->q6v5);
1320 if (ret == -ETIMEDOUT)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001321 dev_err(qproc->dev, "timed out on wait\n");
1322
Sibi Sankar03045302018-10-17 19:25:25 +05301323 q6v5_mba_reclaim(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001324
1325 return 0;
1326}
1327
1328static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
1329{
1330 struct q6v5 *qproc = rproc->priv;
1331 int offset;
1332
1333 offset = da - qproc->mpss_reloc;
1334 if (offset < 0 || offset + len > qproc->mpss_size)
1335 return NULL;
1336
1337 return qproc->mpss_region + offset;
1338}
1339
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301340static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1341 const struct firmware *mba_fw)
1342{
1343 const struct firmware *fw;
1344 const struct elf32_phdr *phdrs;
1345 const struct elf32_phdr *phdr;
1346 const struct elf32_hdr *ehdr;
1347 struct q6v5 *qproc = rproc->priv;
1348 unsigned long i;
1349 int ret;
1350
Sibi Sankara5a4e022019-01-15 01:20:01 +05301351 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301352 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301353 dev_err(qproc->dev, "unable to load %s\n",
1354 qproc->hexagon_mdt_image);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301355 return ret;
1356 }
1357
1358 ehdr = (struct elf32_hdr *)fw->data;
1359 phdrs = (struct elf32_phdr *)(ehdr + 1);
1360 qproc->dump_complete_mask = 0;
1361
1362 for (i = 0; i < ehdr->e_phnum; i++) {
1363 phdr = &phdrs[i];
1364
1365 if (!q6v5_phdr_valid(phdr))
1366 continue;
1367
1368 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1369 phdr->p_memsz,
1370 qcom_q6v5_dump_segment,
1371 (void *)i);
1372 if (ret)
1373 break;
1374
1375 qproc->dump_complete_mask |= BIT(i);
1376 }
1377
1378 release_firmware(fw);
1379 return ret;
1380}
1381
Bjorn Andersson051fb702016-06-20 14:28:41 -07001382static const struct rproc_ops q6v5_ops = {
1383 .start = q6v5_start,
1384 .stop = q6v5_stop,
1385 .da_to_va = q6v5_da_to_va,
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301386 .parse_fw = qcom_q6v5_register_dump_segments,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -08001387 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -07001388};
1389
Bjorn Andersson7d674732018-06-04 13:30:38 -07001390static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001391{
Bjorn Andersson7d674732018-06-04 13:30:38 -07001392 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +05301393
1394 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1395 qproc->proxy_clk_count);
1396 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1397 qproc->proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001398 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001399}
1400
1401static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1402{
1403 struct of_phandle_args args;
1404 struct resource *res;
1405 int ret;
1406
1407 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1408 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001409 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001410 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001411
1412 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1413 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001414 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001415 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001416
1417 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1418 "qcom,halt-regs", 3, 0, &args);
1419 if (ret < 0) {
1420 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1421 return -EINVAL;
1422 }
1423
1424 qproc->halt_map = syscon_node_to_regmap(args.np);
1425 of_node_put(args.np);
1426 if (IS_ERR(qproc->halt_map))
1427 return PTR_ERR(qproc->halt_map);
1428
1429 qproc->halt_q6 = args.args[0];
1430 qproc->halt_modem = args.args[1];
1431 qproc->halt_nc = args.args[2];
1432
Sibi Sankar6439b522019-12-19 11:15:06 +05301433 if (qproc->has_halt_nav) {
1434 struct platform_device *nav_pdev;
1435
1436 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1437 "qcom,halt-nav-regs",
1438 1, 0, &args);
1439 if (ret < 0) {
1440 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
1441 return -EINVAL;
1442 }
1443
1444 nav_pdev = of_find_device_by_node(args.np);
1445 of_node_put(args.np);
1446 if (!nav_pdev) {
1447 dev_err(&pdev->dev, "failed to get mss clock device\n");
1448 return -EPROBE_DEFER;
1449 }
1450
1451 qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL);
1452 if (!qproc->halt_nav_map) {
1453 dev_err(&pdev->dev, "failed to get map from device\n");
1454 return -EINVAL;
1455 }
1456 qproc->halt_nav = args.args[0];
1457
1458 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1459 "qcom,halt-nav-regs",
1460 1, 1, &args);
1461 if (ret < 0) {
1462 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
1463 return -EINVAL;
1464 }
1465
1466 qproc->conn_map = syscon_node_to_regmap(args.np);
1467 of_node_put(args.np);
1468 if (IS_ERR(qproc->conn_map))
1469 return PTR_ERR(qproc->conn_map);
1470
1471 qproc->conn_box = args.args[0];
1472 }
1473
Bjorn Andersson051fb702016-06-20 14:28:41 -07001474 return 0;
1475}
1476
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301477static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1478 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001479{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301480 int i;
1481
1482 if (!clk_names)
1483 return 0;
1484
1485 for (i = 0; clk_names[i]; i++) {
1486 clks[i] = devm_clk_get(dev, clk_names[i]);
1487 if (IS_ERR(clks[i])) {
1488 int rc = PTR_ERR(clks[i]);
1489
1490 if (rc != -EPROBE_DEFER)
1491 dev_err(dev, "Failed to get %s clock\n",
1492 clk_names[i]);
1493 return rc;
1494 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001495 }
1496
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301497 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001498}
1499
Rajendra Nayak4760a892019-01-30 16:39:30 -08001500static int q6v5_pds_attach(struct device *dev, struct device **devs,
1501 char **pd_names)
1502{
1503 size_t num_pds = 0;
1504 int ret;
1505 int i;
1506
1507 if (!pd_names)
1508 return 0;
1509
1510 while (pd_names[num_pds])
1511 num_pds++;
1512
1513 for (i = 0; i < num_pds; i++) {
1514 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
Sibi Sankarf2583fd2019-08-21 23:35:48 +05301515 if (IS_ERR_OR_NULL(devs[i])) {
1516 ret = PTR_ERR(devs[i]) ? : -ENODATA;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001517 goto unroll_attach;
1518 }
1519 }
1520
1521 return num_pds;
1522
1523unroll_attach:
1524 for (i--; i >= 0; i--)
1525 dev_pm_domain_detach(devs[i], false);
1526
1527 return ret;
1528};
1529
1530static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1531 size_t pd_count)
1532{
1533 int i;
1534
1535 for (i = 0; i < pd_count; i++)
1536 dev_pm_domain_detach(pds[i], false);
1537}
1538
Bjorn Andersson051fb702016-06-20 14:28:41 -07001539static int q6v5_init_reset(struct q6v5 *qproc)
1540{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001541 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
Sibi Sankar9e483ef2018-08-30 00:42:14 +05301542 "mss_restart");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001543 if (IS_ERR(qproc->mss_restart)) {
1544 dev_err(qproc->dev, "failed to acquire mss restart\n");
1545 return PTR_ERR(qproc->mss_restart);
1546 }
1547
Sibi Sankar6439b522019-12-19 11:15:06 +05301548 if (qproc->has_alt_reset || qproc->has_halt_nav) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301549 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1550 "pdc_reset");
1551 if (IS_ERR(qproc->pdc_reset)) {
1552 dev_err(qproc->dev, "failed to acquire pdc reset\n");
1553 return PTR_ERR(qproc->pdc_reset);
1554 }
1555 }
1556
Bjorn Andersson051fb702016-06-20 14:28:41 -07001557 return 0;
1558}
1559
Bjorn Andersson051fb702016-06-20 14:28:41 -07001560static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1561{
1562 struct device_node *child;
1563 struct device_node *node;
1564 struct resource r;
1565 int ret;
1566
1567 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1568 node = of_parse_phandle(child, "memory-region", 0);
1569 ret = of_address_to_resource(node, 0, &r);
1570 if (ret) {
1571 dev_err(qproc->dev, "unable to resolve mba region\n");
1572 return ret;
1573 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001574 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001575
1576 qproc->mba_phys = r.start;
1577 qproc->mba_size = resource_size(&r);
1578 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1579 if (!qproc->mba_region) {
1580 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1581 &r.start, qproc->mba_size);
1582 return -EBUSY;
1583 }
1584
1585 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1586 node = of_parse_phandle(child, "memory-region", 0);
1587 ret = of_address_to_resource(node, 0, &r);
1588 if (ret) {
1589 dev_err(qproc->dev, "unable to resolve mpss region\n");
1590 return ret;
1591 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001592 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001593
1594 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1595 qproc->mpss_size = resource_size(&r);
1596 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1597 if (!qproc->mpss_region) {
1598 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1599 &r.start, qproc->mpss_size);
1600 return -EBUSY;
1601 }
1602
1603 return 0;
1604}
1605
1606static int q6v5_probe(struct platform_device *pdev)
1607{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301608 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001609 struct q6v5 *qproc;
1610 struct rproc *rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301611 const char *mba_image;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001612 int ret;
1613
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301614 desc = of_device_get_match_data(&pdev->dev);
1615 if (!desc)
1616 return -EINVAL;
1617
Brian Norrisbbcda302018-10-08 19:08:05 -07001618 if (desc->need_mem_protection && !qcom_scm_is_available())
1619 return -EPROBE_DEFER;
1620
Sibi Sankara5a4e022019-01-15 01:20:01 +05301621 mba_image = desc->hexagon_mba_image;
1622 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1623 0, &mba_image);
1624 if (ret < 0 && ret != -EINVAL)
1625 return ret;
1626
Bjorn Andersson051fb702016-06-20 14:28:41 -07001627 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Sibi Sankara5a4e022019-01-15 01:20:01 +05301628 mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001629 if (!rproc) {
1630 dev_err(&pdev->dev, "failed to allocate rproc\n");
1631 return -ENOMEM;
1632 }
1633
Ramon Fried41071022018-05-24 22:21:41 +03001634 rproc->auto_boot = false;
1635
Bjorn Andersson051fb702016-06-20 14:28:41 -07001636 qproc = (struct q6v5 *)rproc->priv;
1637 qproc->dev = &pdev->dev;
1638 qproc->rproc = rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301639 qproc->hexagon_mdt_image = "modem.mdt";
1640 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1641 1, &qproc->hexagon_mdt_image);
1642 if (ret < 0 && ret != -EINVAL)
1643 return ret;
1644
Bjorn Andersson051fb702016-06-20 14:28:41 -07001645 platform_set_drvdata(pdev, qproc);
1646
Sibi Sankar6439b522019-12-19 11:15:06 +05301647 qproc->has_halt_nav = desc->has_halt_nav;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001648 ret = q6v5_init_mem(qproc, pdev);
1649 if (ret)
1650 goto free_rproc;
1651
1652 ret = q6v5_alloc_memory_region(qproc);
1653 if (ret)
1654 goto free_rproc;
1655
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301656 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1657 desc->proxy_clk_names);
1658 if (ret < 0) {
1659 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001660 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301661 }
1662 qproc->proxy_clk_count = ret;
1663
Sibi Sankar231f67d2018-05-21 22:57:13 +05301664 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1665 desc->reset_clk_names);
1666 if (ret < 0) {
1667 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1668 goto free_rproc;
1669 }
1670 qproc->reset_clk_count = ret;
1671
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301672 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1673 desc->active_clk_names);
1674 if (ret < 0) {
1675 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1676 goto free_rproc;
1677 }
1678 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001679
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301680 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1681 desc->proxy_supply);
1682 if (ret < 0) {
1683 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001684 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301685 }
1686 qproc->proxy_reg_count = ret;
1687
1688 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1689 desc->active_supply);
1690 if (ret < 0) {
1691 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1692 goto free_rproc;
1693 }
1694 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001695
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001696 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1697 desc->active_pd_names);
1698 if (ret < 0) {
1699 dev_err(&pdev->dev, "Failed to attach active power domains\n");
1700 goto free_rproc;
1701 }
1702 qproc->active_pd_count = ret;
1703
Rajendra Nayak4760a892019-01-30 16:39:30 -08001704 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1705 desc->proxy_pd_names);
1706 if (ret < 0) {
1707 dev_err(&pdev->dev, "Failed to init power domains\n");
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001708 goto detach_active_pds;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001709 }
1710 qproc->proxy_pd_count = ret;
1711
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301712 qproc->has_alt_reset = desc->has_alt_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001713 ret = q6v5_init_reset(qproc);
1714 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001715 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001716
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301717 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301718 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson7d674732018-06-04 13:30:38 -07001719
1720 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1721 qcom_msa_handover);
1722 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001723 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001724
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301725 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1726 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Sibi Sankar47254962018-05-21 22:57:14 +05301727 qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001728 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001729 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001730 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
Sibi Sankar027045a2019-01-08 15:53:43 +05301731 if (IS_ERR(qproc->sysmon)) {
1732 ret = PTR_ERR(qproc->sysmon);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001733 goto detach_proxy_pds;
Sibi Sankar027045a2019-01-08 15:53:43 +05301734 }
Bjorn Andersson4b489212017-01-29 14:05:50 -08001735
Bjorn Andersson051fb702016-06-20 14:28:41 -07001736 ret = rproc_add(rproc);
1737 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001738 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001739
1740 return 0;
1741
Rajendra Nayak4760a892019-01-30 16:39:30 -08001742detach_proxy_pds:
1743 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001744detach_active_pds:
1745 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001746free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001747 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001748
1749 return ret;
1750}
1751
1752static int q6v5_remove(struct platform_device *pdev)
1753{
1754 struct q6v5 *qproc = platform_get_drvdata(pdev);
1755
1756 rproc_del(qproc->rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001757
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001758 qcom_remove_sysmon_subdev(qproc->sysmon);
Sibi Sankar47254962018-05-21 22:57:14 +05301759 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001760 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001761 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001762
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001763 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001764 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1765
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001766 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001767
1768 return 0;
1769}
1770
Sibi Sankar6439b522019-12-19 11:15:06 +05301771static const struct rproc_hexagon_res sc7180_mss = {
1772 .hexagon_mba_image = "mba.mbn",
1773 .proxy_clk_names = (char*[]){
1774 "xo",
1775 NULL
1776 },
1777 .reset_clk_names = (char*[]){
1778 "iface",
1779 "bus",
1780 "snoc_axi",
1781 NULL
1782 },
1783 .active_clk_names = (char*[]){
1784 "mnoc_axi",
1785 "nav",
1786 "mss_nav",
1787 "mss_crypto",
1788 NULL
1789 },
1790 .active_pd_names = (char*[]){
1791 "load_state",
1792 NULL
1793 },
1794 .proxy_pd_names = (char*[]){
1795 "cx",
1796 "mx",
1797 "mss",
1798 NULL
1799 },
1800 .need_mem_protection = true,
1801 .has_alt_reset = false,
1802 .has_halt_nav = true,
1803 .version = MSS_SC7180,
1804};
1805
Sibi Sankar231f67d2018-05-21 22:57:13 +05301806static const struct rproc_hexagon_res sdm845_mss = {
1807 .hexagon_mba_image = "mba.mbn",
1808 .proxy_clk_names = (char*[]){
1809 "xo",
Sibi Sankar231f67d2018-05-21 22:57:13 +05301810 "prng",
1811 NULL
1812 },
1813 .reset_clk_names = (char*[]){
1814 "iface",
1815 "snoc_axi",
1816 NULL
1817 },
1818 .active_clk_names = (char*[]){
1819 "bus",
1820 "mem",
1821 "gpll0_mss",
1822 "mnoc_axi",
1823 NULL
1824 },
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001825 .active_pd_names = (char*[]){
1826 "load_state",
1827 NULL
1828 },
Rajendra Nayak4760a892019-01-30 16:39:30 -08001829 .proxy_pd_names = (char*[]){
1830 "cx",
1831 "mx",
1832 "mss",
1833 NULL
1834 },
Sibi Sankar231f67d2018-05-21 22:57:13 +05301835 .need_mem_protection = true,
1836 .has_alt_reset = true,
Sibi Sankar6439b522019-12-19 11:15:06 +05301837 .has_halt_nav = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301838 .version = MSS_SDM845,
1839};
1840
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001841static const struct rproc_hexagon_res msm8998_mss = {
1842 .hexagon_mba_image = "mba.mbn",
1843 .proxy_clk_names = (char*[]){
1844 "xo",
1845 "qdss",
1846 "mem",
1847 NULL
1848 },
1849 .active_clk_names = (char*[]){
1850 "iface",
1851 "bus",
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001852 "gpll0_mss",
1853 "mnoc_axi",
1854 "snoc_axi",
1855 NULL
1856 },
1857 .proxy_pd_names = (char*[]){
1858 "cx",
1859 "mx",
1860 NULL
1861 },
1862 .need_mem_protection = true,
1863 .has_alt_reset = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05301864 .has_halt_nav = false,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001865 .version = MSS_MSM8998,
1866};
1867
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301868static const struct rproc_hexagon_res msm8996_mss = {
1869 .hexagon_mba_image = "mba.mbn",
Sibi Sankar47b87472018-12-29 00:23:05 +05301870 .proxy_supply = (struct qcom_mss_reg_res[]) {
1871 {
1872 .supply = "pll",
1873 .uA = 100000,
1874 },
1875 {}
1876 },
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301877 .proxy_clk_names = (char*[]){
1878 "xo",
1879 "pnoc",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301880 "qdss",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301881 NULL
1882 },
1883 .active_clk_names = (char*[]){
1884 "iface",
1885 "bus",
1886 "mem",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301887 "gpll0_mss",
1888 "snoc_axi",
1889 "mnoc_axi",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301890 NULL
1891 },
1892 .need_mem_protection = true,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301893 .has_alt_reset = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05301894 .has_halt_nav = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301895 .version = MSS_MSM8996,
1896};
1897
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301898static const struct rproc_hexagon_res msm8916_mss = {
1899 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301900 .proxy_supply = (struct qcom_mss_reg_res[]) {
1901 {
1902 .supply = "mx",
1903 .uV = 1050000,
1904 },
1905 {
1906 .supply = "cx",
1907 .uA = 100000,
1908 },
1909 {
1910 .supply = "pll",
1911 .uA = 100000,
1912 },
1913 {}
1914 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301915 .proxy_clk_names = (char*[]){
1916 "xo",
1917 NULL
1918 },
1919 .active_clk_names = (char*[]){
1920 "iface",
1921 "bus",
1922 "mem",
1923 NULL
1924 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301925 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301926 .has_alt_reset = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05301927 .has_halt_nav = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301928 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301929};
1930
1931static const struct rproc_hexagon_res msm8974_mss = {
1932 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301933 .proxy_supply = (struct qcom_mss_reg_res[]) {
1934 {
1935 .supply = "mx",
1936 .uV = 1050000,
1937 },
1938 {
1939 .supply = "cx",
1940 .uA = 100000,
1941 },
1942 {
1943 .supply = "pll",
1944 .uA = 100000,
1945 },
1946 {}
1947 },
1948 .active_supply = (struct qcom_mss_reg_res[]) {
1949 {
1950 .supply = "mss",
1951 .uV = 1050000,
1952 .uA = 100000,
1953 },
1954 {}
1955 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301956 .proxy_clk_names = (char*[]){
1957 "xo",
1958 NULL
1959 },
1960 .active_clk_names = (char*[]){
1961 "iface",
1962 "bus",
1963 "mem",
1964 NULL
1965 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301966 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301967 .has_alt_reset = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05301968 .has_halt_nav = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301969 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301970};
1971
Bjorn Andersson051fb702016-06-20 14:28:41 -07001972static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301973 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1974 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1975 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301976 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001977 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
Sibi Sankar6439b522019-12-19 11:15:06 +05301978 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
Sibi Sankar231f67d2018-05-21 22:57:13 +05301979 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001980 { },
1981};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001982MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001983
1984static struct platform_driver q6v5_driver = {
1985 .probe = q6v5_probe,
1986 .remove = q6v5_remove,
1987 .driver = {
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001988 .name = "qcom-q6v5-mss",
Bjorn Andersson051fb702016-06-20 14:28:41 -07001989 .of_match_table = q6v5_of_match,
1990 },
1991};
1992module_platform_driver(q6v5_driver);
1993
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001994MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001995MODULE_LICENSE("GPL v2");