blob: 899ed769a343b7087ca4eb74ed93a8e7811f14d8 [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Bjorn Andersson051fb702016-06-20 14:28:41 -07002/*
Bjorn Anderssonef73c222018-09-24 16:45:26 -07003 * Qualcomm self-authenticating modem subsystem remoteproc driver
Bjorn Andersson051fb702016-06-20 14:28:41 -07004 *
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bjorn Andersson051fb702016-06-20 14:28:41 -07008 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
16#include <linux/module.h>
17#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053018#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070019#include <linux/platform_device.h>
Rajendra Nayak4760a892019-01-30 16:39:30 -080020#include <linux/pm_domain.h>
21#include <linux/pm_runtime.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070022#include <linux/regmap.h>
23#include <linux/regulator/consumer.h>
24#include <linux/remoteproc.h>
25#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080026#include <linux/soc/qcom/mdt_loader.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053027#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070028
29#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080030#include "qcom_common.h"
Bjorn Andersson7d674732018-06-04 13:30:38 -070031#include "qcom_q6v5.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070032
33#include <linux/qcom_scm.h>
34
Bjorn Andersson051fb702016-06-20 14:28:41 -070035#define MPSS_CRASH_REASON_SMEM 421
36
37/* RMB Status Register Values */
38#define RMB_PBL_SUCCESS 0x1
39
40#define RMB_MBA_XPU_UNLOCKED 0x1
41#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
42#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
43#define RMB_MBA_AUTH_COMPLETE 0x4
44
45/* PBL/MBA interface registers */
46#define RMB_MBA_IMAGE_REG 0x00
47#define RMB_PBL_STATUS_REG 0x04
48#define RMB_MBA_COMMAND_REG 0x08
49#define RMB_MBA_STATUS_REG 0x0C
50#define RMB_PMI_META_DATA_REG 0x10
51#define RMB_PMI_CODE_START_REG 0x14
52#define RMB_PMI_CODE_LENGTH_REG 0x18
Sibi Sankar231f67d2018-05-21 22:57:13 +053053#define RMB_MBA_MSS_STATUS 0x40
54#define RMB_MBA_ALT_RESET 0x44
Bjorn Andersson051fb702016-06-20 14:28:41 -070055
56#define RMB_CMD_META_DATA_READY 0x1
57#define RMB_CMD_LOAD_READY 0x2
58
59/* QDSP6SS Register Offsets */
60#define QDSP6SS_RESET_REG 0x014
61#define QDSP6SS_GFMUX_CTL_REG 0x020
62#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053063#define QDSP6SS_MEM_PWR_CTL 0x0B0
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -070064#define QDSP6V6SS_MEM_PWR_CTL 0x034
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053065#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070066
67/* AXI Halt Register Offsets */
68#define AXI_HALTREQ_REG 0x0
69#define AXI_HALTACK_REG 0x4
70#define AXI_IDLE_REG 0x8
Sibi Sankar6439b522019-12-19 11:15:06 +053071#define NAV_AXI_HALTREQ_BIT BIT(0)
72#define NAV_AXI_HALTACK_BIT BIT(1)
73#define NAV_AXI_IDLE_BIT BIT(2)
Bjorn Andersson051fb702016-06-20 14:28:41 -070074
75#define HALT_ACK_TIMEOUT_MS 100
76
77/* QDSP6SS_RESET */
78#define Q6SS_STOP_CORE BIT(0)
79#define Q6SS_CORE_ARES BIT(1)
80#define Q6SS_BUS_ARES_ENABLE BIT(2)
81
Sibi Sankar7e0f8682020-01-17 19:21:28 +053082/* QDSP6SS CBCR */
83#define Q6SS_CBCR_CLKEN BIT(0)
84#define Q6SS_CBCR_CLKOFF BIT(31)
85#define Q6SS_CBCR_TIMEOUT_US 200
86
Bjorn Andersson051fb702016-06-20 14:28:41 -070087/* QDSP6SS_GFMUX_CTL */
88#define Q6SS_CLK_ENABLE BIT(1)
89
90/* QDSP6SS_PWR_CTL */
91#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
92#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
93#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
94#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
95#define Q6SS_ETB_SLP_NRET_N BIT(17)
96#define Q6SS_L2DATA_STBY_N BIT(18)
97#define Q6SS_SLP_RET_N BIT(19)
98#define Q6SS_CLAMP_IO BIT(20)
99#define QDSS_BHS_ON BIT(21)
100#define QDSS_LDO_BYP BIT(22)
101
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530102/* QDSP6v56 parameters */
103#define QDSP6v56_LDO_BYP BIT(25)
104#define QDSP6v56_BHS_ON BIT(24)
105#define QDSP6v56_CLAMP_WL BIT(21)
106#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530107#define QDSP6SS_XO_CBCR 0x0038
108#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
109
Sibi Sankar231f67d2018-05-21 22:57:13 +0530110/* QDSP6v65 parameters */
Sibi Sankar6439b522019-12-19 11:15:06 +0530111#define QDSP6SS_CORE_CBCR 0x20
Sibi Sankar231f67d2018-05-21 22:57:13 +0530112#define QDSP6SS_SLEEP 0x3C
113#define QDSP6SS_BOOT_CORE_START 0x400
114#define QDSP6SS_BOOT_CMD 0x404
Sibi Sankar6439b522019-12-19 11:15:06 +0530115#define QDSP6SS_BOOT_STATUS 0x408
Sibi Sankar0c2caf72020-01-17 19:21:29 +0530116#define BOOT_STATUS_TIMEOUT_US 200
Sibi Sankar231f67d2018-05-21 22:57:13 +0530117#define BOOT_FSM_TIMEOUT 10000
118
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530119struct reg_info {
120 struct regulator *reg;
121 int uV;
122 int uA;
123};
124
125struct qcom_mss_reg_res {
126 const char *supply;
127 int uV;
128 int uA;
129};
130
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530131struct rproc_hexagon_res {
132 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100133 struct qcom_mss_reg_res *proxy_supply;
134 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530135 char **proxy_clk_names;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530136 char **reset_clk_names;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530137 char **active_clk_names;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800138 char **active_pd_names;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800139 char **proxy_pd_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530140 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530141 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530142 bool has_alt_reset;
Sibi Sankar6439b522019-12-19 11:15:06 +0530143 bool has_halt_nav;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530144};
145
Bjorn Andersson051fb702016-06-20 14:28:41 -0700146struct q6v5 {
147 struct device *dev;
148 struct rproc *rproc;
149
150 void __iomem *reg_base;
151 void __iomem *rmb_base;
152
153 struct regmap *halt_map;
Sibi Sankar6439b522019-12-19 11:15:06 +0530154 struct regmap *halt_nav_map;
155 struct regmap *conn_map;
156
Bjorn Andersson051fb702016-06-20 14:28:41 -0700157 u32 halt_q6;
158 u32 halt_modem;
159 u32 halt_nc;
Sibi Sankar6439b522019-12-19 11:15:06 +0530160 u32 halt_nav;
161 u32 conn_box;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700162
163 struct reset_control *mss_restart;
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530164 struct reset_control *pdc_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700165
Bjorn Andersson7d674732018-06-04 13:30:38 -0700166 struct qcom_q6v5 q6v5;
Sibi Sankar663e9842018-05-21 22:57:09 +0530167
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530168 struct clk *active_clks[8];
Sibi Sankar231f67d2018-05-21 22:57:13 +0530169 struct clk *reset_clks[4];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530170 struct clk *proxy_clks[4];
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800171 struct device *active_pds[1];
Rajendra Nayak4760a892019-01-30 16:39:30 -0800172 struct device *proxy_pds[3];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530173 int active_clk_count;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530174 int reset_clk_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530175 int proxy_clk_count;
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800176 int active_pd_count;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800177 int proxy_pd_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530178
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530179 struct reg_info active_regs[1];
180 struct reg_info proxy_regs[3];
181 int active_reg_count;
182 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700183
Bjorn Andersson051fb702016-06-20 14:28:41 -0700184 bool running;
185
Sibi Sankar03045302018-10-17 19:25:25 +0530186 bool dump_mba_loaded;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +0530187 unsigned long dump_segment_mask;
188 unsigned long dump_complete_mask;
189
Bjorn Andersson051fb702016-06-20 14:28:41 -0700190 phys_addr_t mba_phys;
191 void *mba_region;
192 size_t mba_size;
193
194 phys_addr_t mpss_phys;
195 phys_addr_t mpss_reloc;
196 void *mpss_region;
197 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800198
Sibi Sankar47254962018-05-21 22:57:14 +0530199 struct qcom_rproc_glink glink_subdev;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800200 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700201 struct qcom_rproc_ssr ssr_subdev;
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700202 struct qcom_sysmon *sysmon;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530203 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530204 bool has_alt_reset;
Sibi Sankar6439b522019-12-19 11:15:06 +0530205 bool has_halt_nav;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530206 int mpss_perm;
207 int mba_perm;
Sibi Sankara5a4e022019-01-15 01:20:01 +0530208 const char *hexagon_mdt_image;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530209 int version;
210};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530211
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530212enum {
213 MSS_MSM8916,
214 MSS_MSM8974,
215 MSS_MSM8996,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700216 MSS_MSM8998,
Sibi Sankar6439b522019-12-19 11:15:06 +0530217 MSS_SC7180,
Sibi Sankar231f67d2018-05-21 22:57:13 +0530218 MSS_SDM845,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700219};
220
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530221static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
222 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700223{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530224 int rc;
225 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700226
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800227 if (!reg_res)
228 return 0;
229
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530230 for (i = 0; reg_res[i].supply; i++) {
231 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
232 if (IS_ERR(regs[i].reg)) {
233 rc = PTR_ERR(regs[i].reg);
234 if (rc != -EPROBE_DEFER)
235 dev_err(dev, "Failed to get %s\n regulator",
236 reg_res[i].supply);
237 return rc;
238 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700239
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530240 regs[i].uV = reg_res[i].uV;
241 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700242 }
243
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530244 return i;
245}
246
247static int q6v5_regulator_enable(struct q6v5 *qproc,
248 struct reg_info *regs, int count)
249{
250 int ret;
251 int i;
252
253 for (i = 0; i < count; i++) {
254 if (regs[i].uV > 0) {
255 ret = regulator_set_voltage(regs[i].reg,
256 regs[i].uV, INT_MAX);
257 if (ret) {
258 dev_err(qproc->dev,
259 "Failed to request voltage for %d.\n",
260 i);
261 goto err;
262 }
263 }
264
265 if (regs[i].uA > 0) {
266 ret = regulator_set_load(regs[i].reg,
267 regs[i].uA);
268 if (ret < 0) {
269 dev_err(qproc->dev,
270 "Failed to set regulator mode\n");
271 goto err;
272 }
273 }
274
275 ret = regulator_enable(regs[i].reg);
276 if (ret) {
277 dev_err(qproc->dev, "Regulator enable failed\n");
278 goto err;
279 }
280 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700281
282 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530283err:
284 for (; i >= 0; i--) {
285 if (regs[i].uV > 0)
286 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
287
288 if (regs[i].uA > 0)
289 regulator_set_load(regs[i].reg, 0);
290
291 regulator_disable(regs[i].reg);
292 }
293
294 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700295}
296
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530297static void q6v5_regulator_disable(struct q6v5 *qproc,
298 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700299{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530300 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700301
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530302 for (i = 0; i < count; i++) {
303 if (regs[i].uV > 0)
304 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700305
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530306 if (regs[i].uA > 0)
307 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700308
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530309 regulator_disable(regs[i].reg);
310 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700311}
312
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530313static int q6v5_clk_enable(struct device *dev,
314 struct clk **clks, int count)
315{
316 int rc;
317 int i;
318
319 for (i = 0; i < count; i++) {
320 rc = clk_prepare_enable(clks[i]);
321 if (rc) {
322 dev_err(dev, "Clock enable failed\n");
323 goto err;
324 }
325 }
326
327 return 0;
328err:
329 for (i--; i >= 0; i--)
330 clk_disable_unprepare(clks[i]);
331
332 return rc;
333}
334
335static void q6v5_clk_disable(struct device *dev,
336 struct clk **clks, int count)
337{
338 int i;
339
340 for (i = 0; i < count; i++)
341 clk_disable_unprepare(clks[i]);
342}
343
Rajendra Nayak4760a892019-01-30 16:39:30 -0800344static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
345 size_t pd_count)
346{
347 int ret;
348 int i;
349
350 for (i = 0; i < pd_count; i++) {
351 dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
352 ret = pm_runtime_get_sync(pds[i]);
353 if (ret < 0)
354 goto unroll_pd_votes;
355 }
356
357 return 0;
358
359unroll_pd_votes:
360 for (i--; i >= 0; i--) {
361 dev_pm_genpd_set_performance_state(pds[i], 0);
362 pm_runtime_put(pds[i]);
363 }
364
365 return ret;
366};
367
368static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
369 size_t pd_count)
370{
371 int i;
372
373 for (i = 0; i < pd_count; i++) {
374 dev_pm_genpd_set_performance_state(pds[i], 0);
375 pm_runtime_put(pds[i]);
376 }
377}
378
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530379static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
380 bool remote_owner, phys_addr_t addr,
381 size_t size)
382{
383 struct qcom_scm_vmperm next;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530384
385 if (!qproc->need_mem_protection)
386 return 0;
387 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
388 return 0;
389 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
390 return 0;
391
392 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
393 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
394
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800395 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
396 current_perm, &next, 1);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530397}
398
Bjorn Andersson051fb702016-06-20 14:28:41 -0700399static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
400{
401 struct q6v5 *qproc = rproc->priv;
402
403 memcpy(qproc->mba_region, fw->data, fw->size);
404
405 return 0;
406}
407
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530408static int q6v5_reset_assert(struct q6v5 *qproc)
409{
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530410 int ret;
411
412 if (qproc->has_alt_reset) {
413 reset_control_assert(qproc->pdc_reset);
414 ret = reset_control_reset(qproc->mss_restart);
415 reset_control_deassert(qproc->pdc_reset);
Sibi Sankar6439b522019-12-19 11:15:06 +0530416 } else if (qproc->has_halt_nav) {
417 /* SWAR using CONN_BOX_SPARE_0 for pipeline glitch issue */
418 reset_control_assert(qproc->pdc_reset);
419 regmap_update_bits(qproc->conn_map, qproc->conn_box,
420 BIT(0), BIT(0));
421 regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav,
422 NAV_AXI_HALTREQ_BIT, 0);
423 reset_control_assert(qproc->mss_restart);
424 reset_control_deassert(qproc->pdc_reset);
425 regmap_update_bits(qproc->conn_map, qproc->conn_box,
426 BIT(0), 0);
427 ret = reset_control_deassert(qproc->mss_restart);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530428 } else {
429 ret = reset_control_assert(qproc->mss_restart);
430 }
431
432 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530433}
434
435static int q6v5_reset_deassert(struct q6v5 *qproc)
436{
Sibi Sankar231f67d2018-05-21 22:57:13 +0530437 int ret;
438
439 if (qproc->has_alt_reset) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530440 reset_control_assert(qproc->pdc_reset);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530441 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
442 ret = reset_control_reset(qproc->mss_restart);
443 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530444 reset_control_deassert(qproc->pdc_reset);
Sibi Sankar6439b522019-12-19 11:15:06 +0530445 } else if (qproc->has_halt_nav) {
446 ret = reset_control_reset(qproc->mss_restart);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530447 } else {
448 ret = reset_control_deassert(qproc->mss_restart);
449 }
450
451 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530452}
453
Bjorn Andersson051fb702016-06-20 14:28:41 -0700454static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
455{
456 unsigned long timeout;
457 s32 val;
458
459 timeout = jiffies + msecs_to_jiffies(ms);
460 for (;;) {
461 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
462 if (val)
463 break;
464
465 if (time_after(jiffies, timeout))
466 return -ETIMEDOUT;
467
468 msleep(1);
469 }
470
471 return val;
472}
473
474static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
475{
476
477 unsigned long timeout;
478 s32 val;
479
480 timeout = jiffies + msecs_to_jiffies(ms);
481 for (;;) {
482 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
483 if (val < 0)
484 break;
485
486 if (!status && val)
487 break;
488 else if (status && val == status)
489 break;
490
491 if (time_after(jiffies, timeout))
492 return -ETIMEDOUT;
493
494 msleep(1);
495 }
496
497 return val;
498}
499
500static int q6v5proc_reset(struct q6v5 *qproc)
501{
502 u32 val;
503 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530504 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700505
Sibi Sankar231f67d2018-05-21 22:57:13 +0530506 if (qproc->version == MSS_SDM845) {
507 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530508 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530509 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700510
Sibi Sankar231f67d2018-05-21 22:57:13 +0530511 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530512 val, !(val & Q6SS_CBCR_CLKOFF), 1,
513 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530514 if (ret) {
515 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
516 return -ETIMEDOUT;
517 }
518
519 /* De-assert QDSP6 stop core */
520 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
521 /* Trigger boot FSM */
522 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
523
524 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
525 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
526 if (ret) {
527 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
528 /* Reset the modem so that boot FSM is in reset state */
529 q6v5_reset_deassert(qproc);
530 return ret;
531 }
532
533 goto pbl_wait;
Sibi Sankar6439b522019-12-19 11:15:06 +0530534 } else if (qproc->version == MSS_SC7180) {
535 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530536 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530537 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
538
539 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530540 val, !(val & Q6SS_CBCR_CLKOFF), 1,
541 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530542 if (ret) {
543 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
544 return -ETIMEDOUT;
545 }
546
547 /* Turn on the XO clock needed for PLL setup */
548 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530549 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530550 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
551
552 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530553 val, !(val & Q6SS_CBCR_CLKOFF), 1,
554 Q6SS_CBCR_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530555 if (ret) {
556 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
557 return -ETIMEDOUT;
558 }
559
560 /* Configure Q6 core CBCR to auto-enable after reset sequence */
561 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530562 val |= Q6SS_CBCR_CLKEN;
Sibi Sankar6439b522019-12-19 11:15:06 +0530563 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
564
565 /* De-assert the Q6 stop core signal */
566 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
567
568 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
569 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
570
571 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
572 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
573 val, (val & BIT(0)) != 0, 1,
Sibi Sankar0c2caf72020-01-17 19:21:29 +0530574 BOOT_STATUS_TIMEOUT_US);
Sibi Sankar6439b522019-12-19 11:15:06 +0530575 if (ret) {
576 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
577 /* Reset the modem so that boot FSM is in reset state */
578 q6v5_reset_deassert(qproc);
579 return ret;
580 }
581 goto pbl_wait;
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700582 } else if (qproc->version == MSS_MSM8996 ||
583 qproc->version == MSS_MSM8998) {
584 int mem_pwr_ctl;
585
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530586 /* Override the ACC value if required */
587 writel(QDSP6SS_ACC_OVERRIDE_VAL,
588 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700589
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530590 /* Assert resets, stop core */
591 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
592 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
593 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700594
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530595 /* BHS require xo cbcr to be enabled */
596 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530597 val |= Q6SS_CBCR_CLKEN;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530598 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
599
600 /* Read CLKOFF bit to go low indicating CLK is enabled */
601 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
Sibi Sankar7e0f8682020-01-17 19:21:28 +0530602 val, !(val & Q6SS_CBCR_CLKOFF), 1,
603 Q6SS_CBCR_TIMEOUT_US);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530604 if (ret) {
605 dev_err(qproc->dev,
606 "xo cbcr enabling timed out (rc:%d)\n", ret);
607 return ret;
608 }
609 /* Enable power block headswitch and wait for it to stabilize */
610 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
611 val |= QDSP6v56_BHS_ON;
612 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
613 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
614 udelay(1);
615
616 /* Put LDO in bypass mode */
617 val |= QDSP6v56_LDO_BYP;
618 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
619
620 /* Deassert QDSP6 compiler memory clamp */
621 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
622 val &= ~QDSP6v56_CLAMP_QMC_MEM;
623 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
624
625 /* Deassert memory peripheral sleep and L2 memory standby */
626 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
627 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
628
629 /* Turn on L1, L2, ETB and JU memories 1 at a time */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700630 if (qproc->version == MSS_MSM8996) {
631 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
632 i = 19;
633 } else {
634 /* MSS_MSM8998 */
635 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
636 i = 28;
637 }
638 val = readl(qproc->reg_base + mem_pwr_ctl);
639 for (; i >= 0; i--) {
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530640 val |= BIT(i);
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700641 writel(val, qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530642 /*
643 * Read back value to ensure the write is done then
644 * wait for 1us for both memory peripheral and data
645 * array to turn on.
646 */
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -0700647 val |= readl(qproc->reg_base + mem_pwr_ctl);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530648 udelay(1);
649 }
650 /* Remove word line clamp */
651 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
652 val &= ~QDSP6v56_CLAMP_WL;
653 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
654 } else {
655 /* Assert resets, stop core */
656 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
657 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
658 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
659
660 /* Enable power block headswitch and wait for it to stabilize */
661 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
662 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
663 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
664 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
665 udelay(1);
666 /*
667 * Turn on memories. L2 banks should be done individually
668 * to minimize inrush current.
669 */
670 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
671 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
672 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
673 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
674 val |= Q6SS_L2DATA_SLP_NRET_N_2;
675 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
676 val |= Q6SS_L2DATA_SLP_NRET_N_1;
677 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
678 val |= Q6SS_L2DATA_SLP_NRET_N_0;
679 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
680 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700681 /* Remove IO clamp */
682 val &= ~Q6SS_CLAMP_IO;
683 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
684
685 /* Bring core out of reset */
686 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
687 val &= ~Q6SS_CORE_ARES;
688 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
689
690 /* Turn on core clock */
691 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
692 val |= Q6SS_CLK_ENABLE;
693 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
694
695 /* Start core execution */
696 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
697 val &= ~Q6SS_STOP_CORE;
698 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
699
Sibi Sankar231f67d2018-05-21 22:57:13 +0530700pbl_wait:
Bjorn Andersson051fb702016-06-20 14:28:41 -0700701 /* Wait for PBL status */
702 ret = q6v5_rmb_pbl_wait(qproc, 1000);
703 if (ret == -ETIMEDOUT) {
704 dev_err(qproc->dev, "PBL boot timed out\n");
705 } else if (ret != RMB_PBL_SUCCESS) {
706 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
707 ret = -EINVAL;
708 } else {
709 ret = 0;
710 }
711
712 return ret;
713}
714
715static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
716 struct regmap *halt_map,
717 u32 offset)
718{
719 unsigned long timeout;
720 unsigned int val;
721 int ret;
722
723 /* Check if we're already idle */
724 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
725 if (!ret && val)
726 return;
727
728 /* Assert halt request */
729 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
730
731 /* Wait for halt */
732 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
733 for (;;) {
734 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
735 if (ret || val || time_after(jiffies, timeout))
736 break;
737
738 msleep(1);
739 }
740
741 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
742 if (ret || !val)
743 dev_err(qproc->dev, "port failed halt\n");
744
745 /* Clear halt request (port will remain halted until reset) */
746 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
747}
748
Sibi Sankar6439b522019-12-19 11:15:06 +0530749static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc,
750 struct regmap *halt_map,
751 u32 offset)
752{
753 unsigned long timeout;
754 unsigned int val;
755 int ret;
756
757 /* Check if we're already idle */
758 ret = regmap_read(halt_map, offset, &val);
759 if (!ret && (val & NAV_AXI_IDLE_BIT))
760 return;
761
762 /* Assert halt request */
763 regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT,
764 NAV_AXI_HALTREQ_BIT);
765
766 /* Wait for halt ack*/
767 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
768 for (;;) {
769 ret = regmap_read(halt_map, offset, &val);
770 if (ret || (val & NAV_AXI_HALTACK_BIT) ||
771 time_after(jiffies, timeout))
772 break;
773
774 udelay(5);
775 }
776
777 ret = regmap_read(halt_map, offset, &val);
778 if (ret || !(val & NAV_AXI_IDLE_BIT))
779 dev_err(qproc->dev, "port failed halt\n");
780}
781
Bjorn Andersson051fb702016-06-20 14:28:41 -0700782static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
783{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700784 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700785 dma_addr_t phys;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700786 void *metadata;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530787 int mdata_perm;
788 int xferop_ret;
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700789 size_t size;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700790 void *ptr;
791 int ret;
792
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700793 metadata = qcom_mdt_read_metadata(fw, &size);
794 if (IS_ERR(metadata))
795 return PTR_ERR(metadata);
796
797 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700798 if (!ptr) {
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700799 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700800 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
801 return -ENOMEM;
802 }
803
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700804 memcpy(ptr, metadata, size);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700805
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530806 /* Hypervisor mapping to access metadata by modem */
807 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700808 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800809 if (ret) {
810 dev_err(qproc->dev,
811 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100812 ret = -EAGAIN;
813 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800814 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530815
Bjorn Andersson051fb702016-06-20 14:28:41 -0700816 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
817 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
818
819 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
820 if (ret == -ETIMEDOUT)
821 dev_err(qproc->dev, "MPSS header authentication timed out\n");
822 else if (ret < 0)
823 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
824
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530825 /* Metadata authentication done, remove modem access */
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700826 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530827 if (xferop_ret)
828 dev_warn(qproc->dev,
829 "mdt buffer not reclaimed system may become unstable\n");
830
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100831free_dma_attrs:
Bjorn Anderssonf04b9132019-06-21 18:21:46 -0700832 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
833 kfree(metadata);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700834
835 return ret < 0 ? ret : 0;
836}
837
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800838static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
839{
840 if (phdr->p_type != PT_LOAD)
841 return false;
842
843 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
844 return false;
845
846 if (!phdr->p_memsz)
847 return false;
848
849 return true;
850}
851
Sibi Sankar03045302018-10-17 19:25:25 +0530852static int q6v5_mba_load(struct q6v5 *qproc)
853{
854 int ret;
855 int xfermemop_ret;
856
857 qcom_q6v5_prepare(&qproc->q6v5);
858
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800859 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
860 if (ret < 0) {
861 dev_err(qproc->dev, "failed to enable active power domains\n");
862 goto disable_irqs;
863 }
864
Rajendra Nayak4760a892019-01-30 16:39:30 -0800865 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
866 if (ret < 0) {
867 dev_err(qproc->dev, "failed to enable proxy power domains\n");
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800868 goto disable_active_pds;
Rajendra Nayak4760a892019-01-30 16:39:30 -0800869 }
870
Sibi Sankar03045302018-10-17 19:25:25 +0530871 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
872 qproc->proxy_reg_count);
873 if (ret) {
874 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Rajendra Nayak4760a892019-01-30 16:39:30 -0800875 goto disable_proxy_pds;
Sibi Sankar03045302018-10-17 19:25:25 +0530876 }
877
878 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
879 qproc->proxy_clk_count);
880 if (ret) {
881 dev_err(qproc->dev, "failed to enable proxy clocks\n");
882 goto disable_proxy_reg;
883 }
884
885 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
886 qproc->active_reg_count);
887 if (ret) {
888 dev_err(qproc->dev, "failed to enable supplies\n");
889 goto disable_proxy_clk;
890 }
891
892 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
893 qproc->reset_clk_count);
894 if (ret) {
895 dev_err(qproc->dev, "failed to enable reset clocks\n");
896 goto disable_vdd;
897 }
898
899 ret = q6v5_reset_deassert(qproc);
900 if (ret) {
901 dev_err(qproc->dev, "failed to deassert mss restart\n");
902 goto disable_reset_clks;
903 }
904
905 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
906 qproc->active_clk_count);
907 if (ret) {
908 dev_err(qproc->dev, "failed to enable clocks\n");
909 goto assert_reset;
910 }
911
912 /* Assign MBA image access in DDR to q6 */
913 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
914 qproc->mba_phys, qproc->mba_size);
915 if (ret) {
916 dev_err(qproc->dev,
917 "assigning Q6 access to mba memory failed: %d\n", ret);
918 goto disable_active_clks;
919 }
920
921 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
922
923 ret = q6v5proc_reset(qproc);
924 if (ret)
925 goto reclaim_mba;
926
927 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
928 if (ret == -ETIMEDOUT) {
929 dev_err(qproc->dev, "MBA boot timed out\n");
930 goto halt_axi_ports;
931 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
932 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
933 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
934 ret = -EINVAL;
935 goto halt_axi_ports;
936 }
937
938 qproc->dump_mba_loaded = true;
939 return 0;
940
941halt_axi_ports:
942 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
943 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
Sibi Sankar6439b522019-12-19 11:15:06 +0530944 if (qproc->has_halt_nav)
945 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
946 qproc->halt_nav);
Sibi Sankar03045302018-10-17 19:25:25 +0530947 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
948
949reclaim_mba:
950 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
951 qproc->mba_phys,
952 qproc->mba_size);
953 if (xfermemop_ret) {
954 dev_err(qproc->dev,
955 "Failed to reclaim mba buffer, system may become unstable\n");
956 }
957
958disable_active_clks:
959 q6v5_clk_disable(qproc->dev, qproc->active_clks,
960 qproc->active_clk_count);
961assert_reset:
962 q6v5_reset_assert(qproc);
963disable_reset_clks:
964 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
965 qproc->reset_clk_count);
966disable_vdd:
967 q6v5_regulator_disable(qproc, qproc->active_regs,
968 qproc->active_reg_count);
969disable_proxy_clk:
970 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
971 qproc->proxy_clk_count);
972disable_proxy_reg:
973 q6v5_regulator_disable(qproc, qproc->proxy_regs,
974 qproc->proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -0800975disable_proxy_pds:
976 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -0800977disable_active_pds:
978 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +0530979disable_irqs:
980 qcom_q6v5_unprepare(&qproc->q6v5);
981
982 return ret;
983}
984
985static void q6v5_mba_reclaim(struct q6v5 *qproc)
986{
987 int ret;
988 u32 val;
989
990 qproc->dump_mba_loaded = false;
991
992 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
993 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
Sibi Sankar6439b522019-12-19 11:15:06 +0530994 if (qproc->has_halt_nav)
995 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
996 qproc->halt_nav);
Sibi Sankar03045302018-10-17 19:25:25 +0530997 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
998 if (qproc->version == MSS_MSM8996) {
999 /*
1000 * To avoid high MX current during LPASS/MSS restart.
1001 */
1002 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1003 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
1004 QDSP6v56_CLAMP_QMC_MEM;
1005 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1006 }
1007
1008 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1009 false, qproc->mpss_phys,
1010 qproc->mpss_size);
1011 WARN_ON(ret);
1012
1013 q6v5_reset_assert(qproc);
1014
1015 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1016 qproc->reset_clk_count);
1017 q6v5_clk_disable(qproc->dev, qproc->active_clks,
1018 qproc->active_clk_count);
1019 q6v5_regulator_disable(qproc, qproc->active_regs,
1020 qproc->active_reg_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001021 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301022
1023 /* In case of failure or coredump scenario where reclaiming MBA memory
1024 * could not happen reclaim it here.
1025 */
1026 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1027 qproc->mba_phys,
1028 qproc->mba_size);
1029 WARN_ON(ret);
1030
1031 ret = qcom_q6v5_unprepare(&qproc->q6v5);
1032 if (ret) {
Rajendra Nayak4760a892019-01-30 16:39:30 -08001033 q6v5_pds_disable(qproc, qproc->proxy_pds,
1034 qproc->proxy_pd_count);
Sibi Sankar03045302018-10-17 19:25:25 +05301035 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1036 qproc->proxy_clk_count);
1037 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1038 qproc->proxy_reg_count);
1039 }
1040}
1041
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001042static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001043{
1044 const struct elf32_phdr *phdrs;
1045 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001046 const struct firmware *seg_fw;
1047 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001048 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001049 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001050 phys_addr_t boot_addr;
Stefan Agnerd7dc8992018-06-14 15:28:02 -07001051 phys_addr_t min_addr = PHYS_ADDR_MAX;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001052 phys_addr_t max_addr = 0;
1053 bool relocate = false;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301054 char *fw_name;
1055 size_t fw_name_len;
Bjorn Andersson01625cc52017-02-15 14:00:41 -08001056 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301057 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001058 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001059 int ret;
1060 int i;
1061
Sibi Sankara5a4e022019-01-15 01:20:01 +05301062 fw_name_len = strlen(qproc->hexagon_mdt_image);
1063 if (fw_name_len <= 4)
1064 return -EINVAL;
1065
1066 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1067 if (!fw_name)
1068 return -ENOMEM;
1069
1070 ret = request_firmware(&fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001071 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301072 dev_err(qproc->dev, "unable to load %s\n", fw_name);
1073 goto out;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001074 }
1075
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001076 /* Initialize the RMB validator */
1077 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1078
1079 ret = q6v5_mpss_init_image(qproc, fw);
1080 if (ret)
1081 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001082
1083 ehdr = (struct elf32_hdr *)fw->data;
1084 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001085
1086 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001087 phdr = &phdrs[i];
1088
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001089 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001090 continue;
1091
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001092 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1093 relocate = true;
1094
1095 if (phdr->p_paddr < min_addr)
1096 min_addr = phdr->p_paddr;
1097
1098 if (phdr->p_paddr + phdr->p_memsz > max_addr)
1099 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1100 }
1101
1102 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Sibi Sankar3bf62eb2018-07-27 20:50:03 +05301103 qproc->mpss_reloc = mpss_reloc;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301104 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001105 for (i = 0; i < ehdr->e_phnum; i++) {
1106 phdr = &phdrs[i];
1107
1108 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001109 continue;
1110
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001111 offset = phdr->p_paddr - mpss_reloc;
1112 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1113 dev_err(qproc->dev, "segment outside memory range\n");
1114 ret = -EINVAL;
1115 goto release_firmware;
1116 }
1117
1118 ptr = qproc->mpss_region + offset;
1119
Bjorn Anderssonf04b9132019-06-21 18:21:46 -07001120 if (phdr->p_filesz && phdr->p_offset < fw->size) {
1121 /* Firmware is large enough to be non-split */
1122 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1123 dev_err(qproc->dev,
1124 "failed to load segment %d from truncated file %s\n",
1125 i, fw_name);
1126 ret = -EINVAL;
1127 goto release_firmware;
1128 }
1129
1130 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1131 } else if (phdr->p_filesz) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301132 /* Replace "xxx.xxx" with "xxx.bxx" */
1133 sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1134 ret = request_firmware(&seg_fw, fw_name, qproc->dev);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001135 if (ret) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301136 dev_err(qproc->dev, "failed to load %s\n", fw_name);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -08001137 goto release_firmware;
1138 }
1139
1140 memcpy(ptr, seg_fw->data, seg_fw->size);
1141
1142 release_firmware(seg_fw);
1143 }
1144
1145 if (phdr->p_memsz > phdr->p_filesz) {
1146 memset(ptr + phdr->p_filesz, 0,
1147 phdr->p_memsz - phdr->p_filesz);
1148 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001149 size += phdr->p_memsz;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001150 }
1151
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301152 /* Transfer ownership of modem ddr region to q6 */
1153 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
1154 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001155 if (ret) {
1156 dev_err(qproc->dev,
1157 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +01001158 ret = -EAGAIN;
1159 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -08001160 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301161
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +05301162 boot_addr = relocate ? qproc->mpss_phys : min_addr;
1163 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1164 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1165 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1166
Bjorn Andersson72beb492016-07-12 17:15:45 -07001167 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1168 if (ret == -ETIMEDOUT)
1169 dev_err(qproc->dev, "MPSS authentication timed out\n");
1170 else if (ret < 0)
1171 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1172
Bjorn Andersson051fb702016-06-20 14:28:41 -07001173release_firmware:
1174 release_firmware(fw);
Sibi Sankara5a4e022019-01-15 01:20:01 +05301175out:
1176 kfree(fw_name);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001177
1178 return ret < 0 ? ret : 0;
1179}
1180
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301181static void qcom_q6v5_dump_segment(struct rproc *rproc,
1182 struct rproc_dump_segment *segment,
1183 void *dest)
1184{
1185 int ret = 0;
1186 struct q6v5 *qproc = rproc->priv;
1187 unsigned long mask = BIT((unsigned long)segment->priv);
1188 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1189
1190 /* Unlock mba before copying segments */
1191 if (!qproc->dump_mba_loaded)
1192 ret = q6v5_mba_load(qproc);
1193
1194 if (!ptr || ret)
1195 memset(dest, 0xff, segment->size);
1196 else
1197 memcpy(dest, ptr, segment->size);
1198
1199 qproc->dump_segment_mask |= mask;
1200
1201 /* Reclaim mba after copying segments */
1202 if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
1203 if (qproc->dump_mba_loaded)
1204 q6v5_mba_reclaim(qproc);
1205 }
1206}
1207
Bjorn Andersson051fb702016-06-20 14:28:41 -07001208static int q6v5_start(struct rproc *rproc)
1209{
1210 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301211 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001212 int ret;
1213
Sibi Sankar03045302018-10-17 19:25:25 +05301214 ret = q6v5_mba_load(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001215 if (ret)
Sibi Sankar03045302018-10-17 19:25:25 +05301216 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001217
1218 dev_info(qproc->dev, "MBA booted, loading mpss\n");
1219
1220 ret = q6v5_mpss_load(qproc);
1221 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301222 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001223
Bjorn Andersson7d674732018-06-04 13:30:38 -07001224 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1225 if (ret == -ETIMEDOUT) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001226 dev_err(qproc->dev, "start timed out\n");
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301227 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001228 }
1229
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301230 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1231 qproc->mba_phys,
1232 qproc->mba_size);
1233 if (xfermemop_ret)
1234 dev_err(qproc->dev,
1235 "Failed to reclaim mba buffer system may become unstable\n");
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301236
1237 /* Reset Dump Segment Mask */
1238 qproc->dump_segment_mask = 0;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001239 qproc->running = true;
1240
Bjorn Andersson051fb702016-06-20 14:28:41 -07001241 return 0;
1242
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301243reclaim_mpss:
1244 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1245 false, qproc->mpss_phys,
1246 qproc->mpss_size);
1247 WARN_ON(xfermemop_ret);
Sibi Sankar03045302018-10-17 19:25:25 +05301248 q6v5_mba_reclaim(qproc);
Sibi Sankar663e9842018-05-21 22:57:09 +05301249
Bjorn Andersson051fb702016-06-20 14:28:41 -07001250 return ret;
1251}
1252
1253static int q6v5_stop(struct rproc *rproc)
1254{
1255 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1256 int ret;
1257
1258 qproc->running = false;
1259
Bjorn Andersson7d674732018-06-04 13:30:38 -07001260 ret = qcom_q6v5_request_stop(&qproc->q6v5);
1261 if (ret == -ETIMEDOUT)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001262 dev_err(qproc->dev, "timed out on wait\n");
1263
Sibi Sankar03045302018-10-17 19:25:25 +05301264 q6v5_mba_reclaim(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001265
1266 return 0;
1267}
1268
1269static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
1270{
1271 struct q6v5 *qproc = rproc->priv;
1272 int offset;
1273
1274 offset = da - qproc->mpss_reloc;
1275 if (offset < 0 || offset + len > qproc->mpss_size)
1276 return NULL;
1277
1278 return qproc->mpss_region + offset;
1279}
1280
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301281static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1282 const struct firmware *mba_fw)
1283{
1284 const struct firmware *fw;
1285 const struct elf32_phdr *phdrs;
1286 const struct elf32_phdr *phdr;
1287 const struct elf32_hdr *ehdr;
1288 struct q6v5 *qproc = rproc->priv;
1289 unsigned long i;
1290 int ret;
1291
Sibi Sankara5a4e022019-01-15 01:20:01 +05301292 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301293 if (ret < 0) {
Sibi Sankara5a4e022019-01-15 01:20:01 +05301294 dev_err(qproc->dev, "unable to load %s\n",
1295 qproc->hexagon_mdt_image);
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301296 return ret;
1297 }
1298
1299 ehdr = (struct elf32_hdr *)fw->data;
1300 phdrs = (struct elf32_phdr *)(ehdr + 1);
1301 qproc->dump_complete_mask = 0;
1302
1303 for (i = 0; i < ehdr->e_phnum; i++) {
1304 phdr = &phdrs[i];
1305
1306 if (!q6v5_phdr_valid(phdr))
1307 continue;
1308
1309 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1310 phdr->p_memsz,
1311 qcom_q6v5_dump_segment,
1312 (void *)i);
1313 if (ret)
1314 break;
1315
1316 qproc->dump_complete_mask |= BIT(i);
1317 }
1318
1319 release_firmware(fw);
1320 return ret;
1321}
1322
Bjorn Andersson051fb702016-06-20 14:28:41 -07001323static const struct rproc_ops q6v5_ops = {
1324 .start = q6v5_start,
1325 .stop = q6v5_stop,
1326 .da_to_va = q6v5_da_to_va,
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301327 .parse_fw = qcom_q6v5_register_dump_segments,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -08001328 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -07001329};
1330
Bjorn Andersson7d674732018-06-04 13:30:38 -07001331static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001332{
Bjorn Andersson7d674732018-06-04 13:30:38 -07001333 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +05301334
1335 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1336 qproc->proxy_clk_count);
1337 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1338 qproc->proxy_reg_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001339 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001340}
1341
1342static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1343{
1344 struct of_phandle_args args;
1345 struct resource *res;
1346 int ret;
1347
1348 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1349 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001350 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001351 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001352
1353 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1354 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001355 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001356 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001357
1358 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1359 "qcom,halt-regs", 3, 0, &args);
1360 if (ret < 0) {
1361 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1362 return -EINVAL;
1363 }
1364
1365 qproc->halt_map = syscon_node_to_regmap(args.np);
1366 of_node_put(args.np);
1367 if (IS_ERR(qproc->halt_map))
1368 return PTR_ERR(qproc->halt_map);
1369
1370 qproc->halt_q6 = args.args[0];
1371 qproc->halt_modem = args.args[1];
1372 qproc->halt_nc = args.args[2];
1373
Sibi Sankar6439b522019-12-19 11:15:06 +05301374 if (qproc->has_halt_nav) {
1375 struct platform_device *nav_pdev;
1376
1377 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1378 "qcom,halt-nav-regs",
1379 1, 0, &args);
1380 if (ret < 0) {
1381 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
1382 return -EINVAL;
1383 }
1384
1385 nav_pdev = of_find_device_by_node(args.np);
1386 of_node_put(args.np);
1387 if (!nav_pdev) {
1388 dev_err(&pdev->dev, "failed to get mss clock device\n");
1389 return -EPROBE_DEFER;
1390 }
1391
1392 qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL);
1393 if (!qproc->halt_nav_map) {
1394 dev_err(&pdev->dev, "failed to get map from device\n");
1395 return -EINVAL;
1396 }
1397 qproc->halt_nav = args.args[0];
1398
1399 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1400 "qcom,halt-nav-regs",
1401 1, 1, &args);
1402 if (ret < 0) {
1403 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
1404 return -EINVAL;
1405 }
1406
1407 qproc->conn_map = syscon_node_to_regmap(args.np);
1408 of_node_put(args.np);
1409 if (IS_ERR(qproc->conn_map))
1410 return PTR_ERR(qproc->conn_map);
1411
1412 qproc->conn_box = args.args[0];
1413 }
1414
Bjorn Andersson051fb702016-06-20 14:28:41 -07001415 return 0;
1416}
1417
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301418static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1419 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001420{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301421 int i;
1422
1423 if (!clk_names)
1424 return 0;
1425
1426 for (i = 0; clk_names[i]; i++) {
1427 clks[i] = devm_clk_get(dev, clk_names[i]);
1428 if (IS_ERR(clks[i])) {
1429 int rc = PTR_ERR(clks[i]);
1430
1431 if (rc != -EPROBE_DEFER)
1432 dev_err(dev, "Failed to get %s clock\n",
1433 clk_names[i]);
1434 return rc;
1435 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001436 }
1437
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301438 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001439}
1440
Rajendra Nayak4760a892019-01-30 16:39:30 -08001441static int q6v5_pds_attach(struct device *dev, struct device **devs,
1442 char **pd_names)
1443{
1444 size_t num_pds = 0;
1445 int ret;
1446 int i;
1447
1448 if (!pd_names)
1449 return 0;
1450
1451 while (pd_names[num_pds])
1452 num_pds++;
1453
1454 for (i = 0; i < num_pds; i++) {
1455 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
Sibi Sankarf2583fd2019-08-21 23:35:48 +05301456 if (IS_ERR_OR_NULL(devs[i])) {
1457 ret = PTR_ERR(devs[i]) ? : -ENODATA;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001458 goto unroll_attach;
1459 }
1460 }
1461
1462 return num_pds;
1463
1464unroll_attach:
1465 for (i--; i >= 0; i--)
1466 dev_pm_domain_detach(devs[i], false);
1467
1468 return ret;
1469};
1470
1471static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1472 size_t pd_count)
1473{
1474 int i;
1475
1476 for (i = 0; i < pd_count; i++)
1477 dev_pm_domain_detach(pds[i], false);
1478}
1479
Bjorn Andersson051fb702016-06-20 14:28:41 -07001480static int q6v5_init_reset(struct q6v5 *qproc)
1481{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001482 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
Sibi Sankar9e483ef2018-08-30 00:42:14 +05301483 "mss_restart");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001484 if (IS_ERR(qproc->mss_restart)) {
1485 dev_err(qproc->dev, "failed to acquire mss restart\n");
1486 return PTR_ERR(qproc->mss_restart);
1487 }
1488
Sibi Sankar6439b522019-12-19 11:15:06 +05301489 if (qproc->has_alt_reset || qproc->has_halt_nav) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301490 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1491 "pdc_reset");
1492 if (IS_ERR(qproc->pdc_reset)) {
1493 dev_err(qproc->dev, "failed to acquire pdc reset\n");
1494 return PTR_ERR(qproc->pdc_reset);
1495 }
1496 }
1497
Bjorn Andersson051fb702016-06-20 14:28:41 -07001498 return 0;
1499}
1500
Bjorn Andersson051fb702016-06-20 14:28:41 -07001501static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1502{
1503 struct device_node *child;
1504 struct device_node *node;
1505 struct resource r;
1506 int ret;
1507
1508 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1509 node = of_parse_phandle(child, "memory-region", 0);
1510 ret = of_address_to_resource(node, 0, &r);
1511 if (ret) {
1512 dev_err(qproc->dev, "unable to resolve mba region\n");
1513 return ret;
1514 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001515 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001516
1517 qproc->mba_phys = r.start;
1518 qproc->mba_size = resource_size(&r);
1519 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1520 if (!qproc->mba_region) {
1521 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1522 &r.start, qproc->mba_size);
1523 return -EBUSY;
1524 }
1525
1526 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1527 node = of_parse_phandle(child, "memory-region", 0);
1528 ret = of_address_to_resource(node, 0, &r);
1529 if (ret) {
1530 dev_err(qproc->dev, "unable to resolve mpss region\n");
1531 return ret;
1532 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001533 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001534
1535 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1536 qproc->mpss_size = resource_size(&r);
1537 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1538 if (!qproc->mpss_region) {
1539 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1540 &r.start, qproc->mpss_size);
1541 return -EBUSY;
1542 }
1543
1544 return 0;
1545}
1546
1547static int q6v5_probe(struct platform_device *pdev)
1548{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301549 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001550 struct q6v5 *qproc;
1551 struct rproc *rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301552 const char *mba_image;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001553 int ret;
1554
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301555 desc = of_device_get_match_data(&pdev->dev);
1556 if (!desc)
1557 return -EINVAL;
1558
Brian Norrisbbcda302018-10-08 19:08:05 -07001559 if (desc->need_mem_protection && !qcom_scm_is_available())
1560 return -EPROBE_DEFER;
1561
Sibi Sankara5a4e022019-01-15 01:20:01 +05301562 mba_image = desc->hexagon_mba_image;
1563 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1564 0, &mba_image);
1565 if (ret < 0 && ret != -EINVAL)
1566 return ret;
1567
Bjorn Andersson051fb702016-06-20 14:28:41 -07001568 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Sibi Sankara5a4e022019-01-15 01:20:01 +05301569 mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001570 if (!rproc) {
1571 dev_err(&pdev->dev, "failed to allocate rproc\n");
1572 return -ENOMEM;
1573 }
1574
Ramon Fried41071022018-05-24 22:21:41 +03001575 rproc->auto_boot = false;
1576
Bjorn Andersson051fb702016-06-20 14:28:41 -07001577 qproc = (struct q6v5 *)rproc->priv;
1578 qproc->dev = &pdev->dev;
1579 qproc->rproc = rproc;
Sibi Sankara5a4e022019-01-15 01:20:01 +05301580 qproc->hexagon_mdt_image = "modem.mdt";
1581 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1582 1, &qproc->hexagon_mdt_image);
1583 if (ret < 0 && ret != -EINVAL)
1584 return ret;
1585
Bjorn Andersson051fb702016-06-20 14:28:41 -07001586 platform_set_drvdata(pdev, qproc);
1587
Sibi Sankar6439b522019-12-19 11:15:06 +05301588 qproc->has_halt_nav = desc->has_halt_nav;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001589 ret = q6v5_init_mem(qproc, pdev);
1590 if (ret)
1591 goto free_rproc;
1592
1593 ret = q6v5_alloc_memory_region(qproc);
1594 if (ret)
1595 goto free_rproc;
1596
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301597 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1598 desc->proxy_clk_names);
1599 if (ret < 0) {
1600 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001601 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301602 }
1603 qproc->proxy_clk_count = ret;
1604
Sibi Sankar231f67d2018-05-21 22:57:13 +05301605 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1606 desc->reset_clk_names);
1607 if (ret < 0) {
1608 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1609 goto free_rproc;
1610 }
1611 qproc->reset_clk_count = ret;
1612
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301613 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1614 desc->active_clk_names);
1615 if (ret < 0) {
1616 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1617 goto free_rproc;
1618 }
1619 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001620
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301621 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1622 desc->proxy_supply);
1623 if (ret < 0) {
1624 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001625 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301626 }
1627 qproc->proxy_reg_count = ret;
1628
1629 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1630 desc->active_supply);
1631 if (ret < 0) {
1632 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1633 goto free_rproc;
1634 }
1635 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001636
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001637 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1638 desc->active_pd_names);
1639 if (ret < 0) {
1640 dev_err(&pdev->dev, "Failed to attach active power domains\n");
1641 goto free_rproc;
1642 }
1643 qproc->active_pd_count = ret;
1644
Rajendra Nayak4760a892019-01-30 16:39:30 -08001645 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1646 desc->proxy_pd_names);
1647 if (ret < 0) {
1648 dev_err(&pdev->dev, "Failed to init power domains\n");
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001649 goto detach_active_pds;
Rajendra Nayak4760a892019-01-30 16:39:30 -08001650 }
1651 qproc->proxy_pd_count = ret;
1652
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301653 qproc->has_alt_reset = desc->has_alt_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001654 ret = q6v5_init_reset(qproc);
1655 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001656 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001657
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301658 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301659 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson7d674732018-06-04 13:30:38 -07001660
1661 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1662 qcom_msa_handover);
1663 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001664 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001665
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301666 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1667 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Sibi Sankar47254962018-05-21 22:57:14 +05301668 qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001669 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001670 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001671 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
Sibi Sankar027045a2019-01-08 15:53:43 +05301672 if (IS_ERR(qproc->sysmon)) {
1673 ret = PTR_ERR(qproc->sysmon);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001674 goto detach_proxy_pds;
Sibi Sankar027045a2019-01-08 15:53:43 +05301675 }
Bjorn Andersson4b489212017-01-29 14:05:50 -08001676
Bjorn Andersson051fb702016-06-20 14:28:41 -07001677 ret = rproc_add(rproc);
1678 if (ret)
Rajendra Nayak4760a892019-01-30 16:39:30 -08001679 goto detach_proxy_pds;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001680
1681 return 0;
1682
Rajendra Nayak4760a892019-01-30 16:39:30 -08001683detach_proxy_pds:
1684 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001685detach_active_pds:
1686 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001687free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001688 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001689
1690 return ret;
1691}
1692
1693static int q6v5_remove(struct platform_device *pdev)
1694{
1695 struct q6v5 *qproc = platform_get_drvdata(pdev);
1696
1697 rproc_del(qproc->rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001698
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001699 qcom_remove_sysmon_subdev(qproc->sysmon);
Sibi Sankar47254962018-05-21 22:57:14 +05301700 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001701 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001702 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001703
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001704 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
Rajendra Nayak4760a892019-01-30 16:39:30 -08001705 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1706
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001707 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001708
1709 return 0;
1710}
1711
Sibi Sankar6439b522019-12-19 11:15:06 +05301712static const struct rproc_hexagon_res sc7180_mss = {
1713 .hexagon_mba_image = "mba.mbn",
1714 .proxy_clk_names = (char*[]){
1715 "xo",
1716 NULL
1717 },
1718 .reset_clk_names = (char*[]){
1719 "iface",
1720 "bus",
1721 "snoc_axi",
1722 NULL
1723 },
1724 .active_clk_names = (char*[]){
1725 "mnoc_axi",
1726 "nav",
1727 "mss_nav",
1728 "mss_crypto",
1729 NULL
1730 },
1731 .active_pd_names = (char*[]){
1732 "load_state",
1733 NULL
1734 },
1735 .proxy_pd_names = (char*[]){
1736 "cx",
1737 "mx",
1738 "mss",
1739 NULL
1740 },
1741 .need_mem_protection = true,
1742 .has_alt_reset = false,
1743 .has_halt_nav = true,
1744 .version = MSS_SC7180,
1745};
1746
Sibi Sankar231f67d2018-05-21 22:57:13 +05301747static const struct rproc_hexagon_res sdm845_mss = {
1748 .hexagon_mba_image = "mba.mbn",
1749 .proxy_clk_names = (char*[]){
1750 "xo",
Sibi Sankar231f67d2018-05-21 22:57:13 +05301751 "prng",
1752 NULL
1753 },
1754 .reset_clk_names = (char*[]){
1755 "iface",
1756 "snoc_axi",
1757 NULL
1758 },
1759 .active_clk_names = (char*[]){
1760 "bus",
1761 "mem",
1762 "gpll0_mss",
1763 "mnoc_axi",
1764 NULL
1765 },
Bjorn Anderssondeb9bb82019-01-30 16:39:31 -08001766 .active_pd_names = (char*[]){
1767 "load_state",
1768 NULL
1769 },
Rajendra Nayak4760a892019-01-30 16:39:30 -08001770 .proxy_pd_names = (char*[]){
1771 "cx",
1772 "mx",
1773 "mss",
1774 NULL
1775 },
Sibi Sankar231f67d2018-05-21 22:57:13 +05301776 .need_mem_protection = true,
1777 .has_alt_reset = true,
Sibi Sankar6439b522019-12-19 11:15:06 +05301778 .has_halt_nav = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301779 .version = MSS_SDM845,
1780};
1781
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001782static const struct rproc_hexagon_res msm8998_mss = {
1783 .hexagon_mba_image = "mba.mbn",
1784 .proxy_clk_names = (char*[]){
1785 "xo",
1786 "qdss",
1787 "mem",
1788 NULL
1789 },
1790 .active_clk_names = (char*[]){
1791 "iface",
1792 "bus",
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001793 "gpll0_mss",
1794 "mnoc_axi",
1795 "snoc_axi",
1796 NULL
1797 },
1798 .proxy_pd_names = (char*[]){
1799 "cx",
1800 "mx",
1801 NULL
1802 },
1803 .need_mem_protection = true,
1804 .has_alt_reset = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05301805 .has_halt_nav = false,
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001806 .version = MSS_MSM8998,
1807};
1808
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301809static const struct rproc_hexagon_res msm8996_mss = {
1810 .hexagon_mba_image = "mba.mbn",
Sibi Sankar47b87472018-12-29 00:23:05 +05301811 .proxy_supply = (struct qcom_mss_reg_res[]) {
1812 {
1813 .supply = "pll",
1814 .uA = 100000,
1815 },
1816 {}
1817 },
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301818 .proxy_clk_names = (char*[]){
1819 "xo",
1820 "pnoc",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301821 "qdss",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301822 NULL
1823 },
1824 .active_clk_names = (char*[]){
1825 "iface",
1826 "bus",
1827 "mem",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301828 "gpll0_mss",
1829 "snoc_axi",
1830 "mnoc_axi",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301831 NULL
1832 },
1833 .need_mem_protection = true,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301834 .has_alt_reset = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05301835 .has_halt_nav = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301836 .version = MSS_MSM8996,
1837};
1838
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301839static const struct rproc_hexagon_res msm8916_mss = {
1840 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301841 .proxy_supply = (struct qcom_mss_reg_res[]) {
1842 {
1843 .supply = "mx",
1844 .uV = 1050000,
1845 },
1846 {
1847 .supply = "cx",
1848 .uA = 100000,
1849 },
1850 {
1851 .supply = "pll",
1852 .uA = 100000,
1853 },
1854 {}
1855 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301856 .proxy_clk_names = (char*[]){
1857 "xo",
1858 NULL
1859 },
1860 .active_clk_names = (char*[]){
1861 "iface",
1862 "bus",
1863 "mem",
1864 NULL
1865 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301866 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301867 .has_alt_reset = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05301868 .has_halt_nav = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301869 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301870};
1871
1872static const struct rproc_hexagon_res msm8974_mss = {
1873 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301874 .proxy_supply = (struct qcom_mss_reg_res[]) {
1875 {
1876 .supply = "mx",
1877 .uV = 1050000,
1878 },
1879 {
1880 .supply = "cx",
1881 .uA = 100000,
1882 },
1883 {
1884 .supply = "pll",
1885 .uA = 100000,
1886 },
1887 {}
1888 },
1889 .active_supply = (struct qcom_mss_reg_res[]) {
1890 {
1891 .supply = "mss",
1892 .uV = 1050000,
1893 .uA = 100000,
1894 },
1895 {}
1896 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301897 .proxy_clk_names = (char*[]){
1898 "xo",
1899 NULL
1900 },
1901 .active_clk_names = (char*[]){
1902 "iface",
1903 "bus",
1904 "mem",
1905 NULL
1906 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301907 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301908 .has_alt_reset = false,
Sibi Sankar6439b522019-12-19 11:15:06 +05301909 .has_halt_nav = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301910 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301911};
1912
Bjorn Andersson051fb702016-06-20 14:28:41 -07001913static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301914 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1915 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1916 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301917 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Jeffrey Hugo1665cbd2019-10-31 19:45:01 -07001918 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
Sibi Sankar6439b522019-12-19 11:15:06 +05301919 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
Sibi Sankar231f67d2018-05-21 22:57:13 +05301920 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001921 { },
1922};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001923MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001924
1925static struct platform_driver q6v5_driver = {
1926 .probe = q6v5_probe,
1927 .remove = q6v5_remove,
1928 .driver = {
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001929 .name = "qcom-q6v5-mss",
Bjorn Andersson051fb702016-06-20 14:28:41 -07001930 .of_match_table = q6v5_of_match,
1931 },
1932};
1933module_platform_driver(q6v5_driver);
1934
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001935MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001936MODULE_LICENSE("GPL v2");