blob: c86dc40cfb8cfcb3427cfafd3f271d2fc6a44e45 [file] [log] [blame]
Bjorn Andersson051fb702016-06-20 14:28:41 -07001/*
Bjorn Anderssonef73c222018-09-24 16:45:26 -07002 * Qualcomm self-authenticating modem subsystem remoteproc driver
Bjorn Andersson051fb702016-06-20 14:28:41 -07003 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/mfd/syscon.h>
24#include <linux/module.h>
25#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053026#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070027#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/regulator/consumer.h>
30#include <linux/remoteproc.h>
31#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080032#include <linux/soc/qcom/mdt_loader.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053033#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070034
35#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080036#include "qcom_common.h"
Bjorn Andersson7d674732018-06-04 13:30:38 -070037#include "qcom_q6v5.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070038
39#include <linux/qcom_scm.h>
40
Bjorn Andersson051fb702016-06-20 14:28:41 -070041#define MPSS_CRASH_REASON_SMEM 421
42
43/* RMB Status Register Values */
44#define RMB_PBL_SUCCESS 0x1
45
46#define RMB_MBA_XPU_UNLOCKED 0x1
47#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
48#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
49#define RMB_MBA_AUTH_COMPLETE 0x4
50
51/* PBL/MBA interface registers */
52#define RMB_MBA_IMAGE_REG 0x00
53#define RMB_PBL_STATUS_REG 0x04
54#define RMB_MBA_COMMAND_REG 0x08
55#define RMB_MBA_STATUS_REG 0x0C
56#define RMB_PMI_META_DATA_REG 0x10
57#define RMB_PMI_CODE_START_REG 0x14
58#define RMB_PMI_CODE_LENGTH_REG 0x18
Sibi Sankar231f67d2018-05-21 22:57:13 +053059#define RMB_MBA_MSS_STATUS 0x40
60#define RMB_MBA_ALT_RESET 0x44
Bjorn Andersson051fb702016-06-20 14:28:41 -070061
62#define RMB_CMD_META_DATA_READY 0x1
63#define RMB_CMD_LOAD_READY 0x2
64
65/* QDSP6SS Register Offsets */
66#define QDSP6SS_RESET_REG 0x014
67#define QDSP6SS_GFMUX_CTL_REG 0x020
68#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053069#define QDSP6SS_MEM_PWR_CTL 0x0B0
70#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070071
72/* AXI Halt Register Offsets */
73#define AXI_HALTREQ_REG 0x0
74#define AXI_HALTACK_REG 0x4
75#define AXI_IDLE_REG 0x8
76
77#define HALT_ACK_TIMEOUT_MS 100
78
79/* QDSP6SS_RESET */
80#define Q6SS_STOP_CORE BIT(0)
81#define Q6SS_CORE_ARES BIT(1)
82#define Q6SS_BUS_ARES_ENABLE BIT(2)
83
84/* QDSP6SS_GFMUX_CTL */
85#define Q6SS_CLK_ENABLE BIT(1)
86
87/* QDSP6SS_PWR_CTL */
88#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
89#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
90#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
91#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
92#define Q6SS_ETB_SLP_NRET_N BIT(17)
93#define Q6SS_L2DATA_STBY_N BIT(18)
94#define Q6SS_SLP_RET_N BIT(19)
95#define Q6SS_CLAMP_IO BIT(20)
96#define QDSS_BHS_ON BIT(21)
97#define QDSS_LDO_BYP BIT(22)
98
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053099/* QDSP6v56 parameters */
100#define QDSP6v56_LDO_BYP BIT(25)
101#define QDSP6v56_BHS_ON BIT(24)
102#define QDSP6v56_CLAMP_WL BIT(21)
103#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
104#define HALT_CHECK_MAX_LOOPS 200
105#define QDSP6SS_XO_CBCR 0x0038
106#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
107
Sibi Sankar231f67d2018-05-21 22:57:13 +0530108/* QDSP6v65 parameters */
109#define QDSP6SS_SLEEP 0x3C
110#define QDSP6SS_BOOT_CORE_START 0x400
111#define QDSP6SS_BOOT_CMD 0x404
112#define SLEEP_CHECK_MAX_LOOPS 200
113#define BOOT_FSM_TIMEOUT 10000
114
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530115struct reg_info {
116 struct regulator *reg;
117 int uV;
118 int uA;
119};
120
121struct qcom_mss_reg_res {
122 const char *supply;
123 int uV;
124 int uA;
125};
126
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530127struct rproc_hexagon_res {
128 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100129 struct qcom_mss_reg_res *proxy_supply;
130 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530131 char **proxy_clk_names;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530132 char **reset_clk_names;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530133 char **active_clk_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530134 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530135 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530136 bool has_alt_reset;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530137};
138
Bjorn Andersson051fb702016-06-20 14:28:41 -0700139struct q6v5 {
140 struct device *dev;
141 struct rproc *rproc;
142
143 void __iomem *reg_base;
144 void __iomem *rmb_base;
145
146 struct regmap *halt_map;
147 u32 halt_q6;
148 u32 halt_modem;
149 u32 halt_nc;
150
151 struct reset_control *mss_restart;
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530152 struct reset_control *pdc_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700153
Bjorn Andersson7d674732018-06-04 13:30:38 -0700154 struct qcom_q6v5 q6v5;
Sibi Sankar663e9842018-05-21 22:57:09 +0530155
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530156 struct clk *active_clks[8];
Sibi Sankar231f67d2018-05-21 22:57:13 +0530157 struct clk *reset_clks[4];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530158 struct clk *proxy_clks[4];
159 int active_clk_count;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530160 int reset_clk_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530161 int proxy_clk_count;
162
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530163 struct reg_info active_regs[1];
164 struct reg_info proxy_regs[3];
165 int active_reg_count;
166 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700167
Bjorn Andersson051fb702016-06-20 14:28:41 -0700168 bool running;
169
Sibi Sankar03045302018-10-17 19:25:25 +0530170 bool dump_mba_loaded;
Sibi Sankar7dd8ade22018-10-17 19:25:26 +0530171 unsigned long dump_segment_mask;
172 unsigned long dump_complete_mask;
173
Bjorn Andersson051fb702016-06-20 14:28:41 -0700174 phys_addr_t mba_phys;
175 void *mba_region;
176 size_t mba_size;
177
178 phys_addr_t mpss_phys;
179 phys_addr_t mpss_reloc;
180 void *mpss_region;
181 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800182
Sibi Sankar47254962018-05-21 22:57:14 +0530183 struct qcom_rproc_glink glink_subdev;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800184 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700185 struct qcom_rproc_ssr ssr_subdev;
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700186 struct qcom_sysmon *sysmon;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530187 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530188 bool has_alt_reset;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530189 int mpss_perm;
190 int mba_perm;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530191 int version;
192};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530193
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530194enum {
195 MSS_MSM8916,
196 MSS_MSM8974,
197 MSS_MSM8996,
Sibi Sankar231f67d2018-05-21 22:57:13 +0530198 MSS_SDM845,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700199};
200
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530201static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
202 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700203{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530204 int rc;
205 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700206
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800207 if (!reg_res)
208 return 0;
209
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530210 for (i = 0; reg_res[i].supply; i++) {
211 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
212 if (IS_ERR(regs[i].reg)) {
213 rc = PTR_ERR(regs[i].reg);
214 if (rc != -EPROBE_DEFER)
215 dev_err(dev, "Failed to get %s\n regulator",
216 reg_res[i].supply);
217 return rc;
218 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700219
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530220 regs[i].uV = reg_res[i].uV;
221 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700222 }
223
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530224 return i;
225}
226
227static int q6v5_regulator_enable(struct q6v5 *qproc,
228 struct reg_info *regs, int count)
229{
230 int ret;
231 int i;
232
233 for (i = 0; i < count; i++) {
234 if (regs[i].uV > 0) {
235 ret = regulator_set_voltage(regs[i].reg,
236 regs[i].uV, INT_MAX);
237 if (ret) {
238 dev_err(qproc->dev,
239 "Failed to request voltage for %d.\n",
240 i);
241 goto err;
242 }
243 }
244
245 if (regs[i].uA > 0) {
246 ret = regulator_set_load(regs[i].reg,
247 regs[i].uA);
248 if (ret < 0) {
249 dev_err(qproc->dev,
250 "Failed to set regulator mode\n");
251 goto err;
252 }
253 }
254
255 ret = regulator_enable(regs[i].reg);
256 if (ret) {
257 dev_err(qproc->dev, "Regulator enable failed\n");
258 goto err;
259 }
260 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700261
262 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530263err:
264 for (; i >= 0; i--) {
265 if (regs[i].uV > 0)
266 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
267
268 if (regs[i].uA > 0)
269 regulator_set_load(regs[i].reg, 0);
270
271 regulator_disable(regs[i].reg);
272 }
273
274 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700275}
276
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530277static void q6v5_regulator_disable(struct q6v5 *qproc,
278 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700279{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530280 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700281
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530282 for (i = 0; i < count; i++) {
283 if (regs[i].uV > 0)
284 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700285
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530286 if (regs[i].uA > 0)
287 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700288
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530289 regulator_disable(regs[i].reg);
290 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700291}
292
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530293static int q6v5_clk_enable(struct device *dev,
294 struct clk **clks, int count)
295{
296 int rc;
297 int i;
298
299 for (i = 0; i < count; i++) {
300 rc = clk_prepare_enable(clks[i]);
301 if (rc) {
302 dev_err(dev, "Clock enable failed\n");
303 goto err;
304 }
305 }
306
307 return 0;
308err:
309 for (i--; i >= 0; i--)
310 clk_disable_unprepare(clks[i]);
311
312 return rc;
313}
314
315static void q6v5_clk_disable(struct device *dev,
316 struct clk **clks, int count)
317{
318 int i;
319
320 for (i = 0; i < count; i++)
321 clk_disable_unprepare(clks[i]);
322}
323
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530324static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
325 bool remote_owner, phys_addr_t addr,
326 size_t size)
327{
328 struct qcom_scm_vmperm next;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530329
330 if (!qproc->need_mem_protection)
331 return 0;
332 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
333 return 0;
334 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
335 return 0;
336
337 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
338 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
339
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800340 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
341 current_perm, &next, 1);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530342}
343
Bjorn Andersson051fb702016-06-20 14:28:41 -0700344static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
345{
346 struct q6v5 *qproc = rproc->priv;
347
348 memcpy(qproc->mba_region, fw->data, fw->size);
349
350 return 0;
351}
352
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530353static int q6v5_reset_assert(struct q6v5 *qproc)
354{
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530355 int ret;
356
357 if (qproc->has_alt_reset) {
358 reset_control_assert(qproc->pdc_reset);
359 ret = reset_control_reset(qproc->mss_restart);
360 reset_control_deassert(qproc->pdc_reset);
361 } else {
362 ret = reset_control_assert(qproc->mss_restart);
363 }
364
365 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530366}
367
368static int q6v5_reset_deassert(struct q6v5 *qproc)
369{
Sibi Sankar231f67d2018-05-21 22:57:13 +0530370 int ret;
371
372 if (qproc->has_alt_reset) {
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530373 reset_control_assert(qproc->pdc_reset);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530374 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
375 ret = reset_control_reset(qproc->mss_restart);
376 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
Sibi Sankar29a5f9a2018-08-30 00:42:15 +0530377 reset_control_deassert(qproc->pdc_reset);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530378 } else {
379 ret = reset_control_deassert(qproc->mss_restart);
380 }
381
382 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530383}
384
Bjorn Andersson051fb702016-06-20 14:28:41 -0700385static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
386{
387 unsigned long timeout;
388 s32 val;
389
390 timeout = jiffies + msecs_to_jiffies(ms);
391 for (;;) {
392 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
393 if (val)
394 break;
395
396 if (time_after(jiffies, timeout))
397 return -ETIMEDOUT;
398
399 msleep(1);
400 }
401
402 return val;
403}
404
405static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
406{
407
408 unsigned long timeout;
409 s32 val;
410
411 timeout = jiffies + msecs_to_jiffies(ms);
412 for (;;) {
413 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
414 if (val < 0)
415 break;
416
417 if (!status && val)
418 break;
419 else if (status && val == status)
420 break;
421
422 if (time_after(jiffies, timeout))
423 return -ETIMEDOUT;
424
425 msleep(1);
426 }
427
428 return val;
429}
430
431static int q6v5proc_reset(struct q6v5 *qproc)
432{
433 u32 val;
434 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530435 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700436
Sibi Sankar231f67d2018-05-21 22:57:13 +0530437 if (qproc->version == MSS_SDM845) {
438 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
439 val |= 0x1;
440 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700441
Sibi Sankar231f67d2018-05-21 22:57:13 +0530442 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
443 val, !(val & BIT(31)), 1,
444 SLEEP_CHECK_MAX_LOOPS);
445 if (ret) {
446 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
447 return -ETIMEDOUT;
448 }
449
450 /* De-assert QDSP6 stop core */
451 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
452 /* Trigger boot FSM */
453 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
454
455 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
456 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
457 if (ret) {
458 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
459 /* Reset the modem so that boot FSM is in reset state */
460 q6v5_reset_deassert(qproc);
461 return ret;
462 }
463
464 goto pbl_wait;
465 } else if (qproc->version == MSS_MSM8996) {
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530466 /* Override the ACC value if required */
467 writel(QDSP6SS_ACC_OVERRIDE_VAL,
468 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700469
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530470 /* Assert resets, stop core */
471 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
472 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
473 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700474
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530475 /* BHS require xo cbcr to be enabled */
476 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
477 val |= 0x1;
478 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
479
480 /* Read CLKOFF bit to go low indicating CLK is enabled */
481 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
482 val, !(val & BIT(31)), 1,
483 HALT_CHECK_MAX_LOOPS);
484 if (ret) {
485 dev_err(qproc->dev,
486 "xo cbcr enabling timed out (rc:%d)\n", ret);
487 return ret;
488 }
489 /* Enable power block headswitch and wait for it to stabilize */
490 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
491 val |= QDSP6v56_BHS_ON;
492 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
493 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
494 udelay(1);
495
496 /* Put LDO in bypass mode */
497 val |= QDSP6v56_LDO_BYP;
498 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
499
500 /* Deassert QDSP6 compiler memory clamp */
501 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
502 val &= ~QDSP6v56_CLAMP_QMC_MEM;
503 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
504
505 /* Deassert memory peripheral sleep and L2 memory standby */
506 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
507 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
508
509 /* Turn on L1, L2, ETB and JU memories 1 at a time */
510 val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
511 for (i = 19; i >= 0; i--) {
512 val |= BIT(i);
513 writel(val, qproc->reg_base +
514 QDSP6SS_MEM_PWR_CTL);
515 /*
516 * Read back value to ensure the write is done then
517 * wait for 1us for both memory peripheral and data
518 * array to turn on.
519 */
520 val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
521 udelay(1);
522 }
523 /* Remove word line clamp */
524 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
525 val &= ~QDSP6v56_CLAMP_WL;
526 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
527 } else {
528 /* Assert resets, stop core */
529 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
530 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
531 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
532
533 /* Enable power block headswitch and wait for it to stabilize */
534 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
535 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
536 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
537 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
538 udelay(1);
539 /*
540 * Turn on memories. L2 banks should be done individually
541 * to minimize inrush current.
542 */
543 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
544 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
545 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
546 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
547 val |= Q6SS_L2DATA_SLP_NRET_N_2;
548 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
549 val |= Q6SS_L2DATA_SLP_NRET_N_1;
550 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
551 val |= Q6SS_L2DATA_SLP_NRET_N_0;
552 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
553 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700554 /* Remove IO clamp */
555 val &= ~Q6SS_CLAMP_IO;
556 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
557
558 /* Bring core out of reset */
559 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
560 val &= ~Q6SS_CORE_ARES;
561 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
562
563 /* Turn on core clock */
564 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
565 val |= Q6SS_CLK_ENABLE;
566 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
567
568 /* Start core execution */
569 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
570 val &= ~Q6SS_STOP_CORE;
571 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
572
Sibi Sankar231f67d2018-05-21 22:57:13 +0530573pbl_wait:
Bjorn Andersson051fb702016-06-20 14:28:41 -0700574 /* Wait for PBL status */
575 ret = q6v5_rmb_pbl_wait(qproc, 1000);
576 if (ret == -ETIMEDOUT) {
577 dev_err(qproc->dev, "PBL boot timed out\n");
578 } else if (ret != RMB_PBL_SUCCESS) {
579 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
580 ret = -EINVAL;
581 } else {
582 ret = 0;
583 }
584
585 return ret;
586}
587
588static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
589 struct regmap *halt_map,
590 u32 offset)
591{
592 unsigned long timeout;
593 unsigned int val;
594 int ret;
595
596 /* Check if we're already idle */
597 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
598 if (!ret && val)
599 return;
600
601 /* Assert halt request */
602 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
603
604 /* Wait for halt */
605 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
606 for (;;) {
607 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
608 if (ret || val || time_after(jiffies, timeout))
609 break;
610
611 msleep(1);
612 }
613
614 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
615 if (ret || !val)
616 dev_err(qproc->dev, "port failed halt\n");
617
618 /* Clear halt request (port will remain halted until reset) */
619 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
620}
621
622static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
623{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700624 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700625 dma_addr_t phys;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530626 int mdata_perm;
627 int xferop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700628 void *ptr;
629 int ret;
630
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700631 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700632 if (!ptr) {
633 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
634 return -ENOMEM;
635 }
636
637 memcpy(ptr, fw->data, fw->size);
638
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530639 /* Hypervisor mapping to access metadata by modem */
640 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
641 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
642 true, phys, fw->size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800643 if (ret) {
644 dev_err(qproc->dev,
645 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100646 ret = -EAGAIN;
647 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800648 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530649
Bjorn Andersson051fb702016-06-20 14:28:41 -0700650 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
651 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
652
653 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
654 if (ret == -ETIMEDOUT)
655 dev_err(qproc->dev, "MPSS header authentication timed out\n");
656 else if (ret < 0)
657 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
658
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530659 /* Metadata authentication done, remove modem access */
660 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
661 false, phys, fw->size);
662 if (xferop_ret)
663 dev_warn(qproc->dev,
664 "mdt buffer not reclaimed system may become unstable\n");
665
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100666free_dma_attrs:
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700667 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700668
669 return ret < 0 ? ret : 0;
670}
671
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800672static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
673{
674 if (phdr->p_type != PT_LOAD)
675 return false;
676
677 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
678 return false;
679
680 if (!phdr->p_memsz)
681 return false;
682
683 return true;
684}
685
Sibi Sankar03045302018-10-17 19:25:25 +0530686static int q6v5_mba_load(struct q6v5 *qproc)
687{
688 int ret;
689 int xfermemop_ret;
690
691 qcom_q6v5_prepare(&qproc->q6v5);
692
693 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
694 qproc->proxy_reg_count);
695 if (ret) {
696 dev_err(qproc->dev, "failed to enable proxy supplies\n");
697 goto disable_irqs;
698 }
699
700 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
701 qproc->proxy_clk_count);
702 if (ret) {
703 dev_err(qproc->dev, "failed to enable proxy clocks\n");
704 goto disable_proxy_reg;
705 }
706
707 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
708 qproc->active_reg_count);
709 if (ret) {
710 dev_err(qproc->dev, "failed to enable supplies\n");
711 goto disable_proxy_clk;
712 }
713
714 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
715 qproc->reset_clk_count);
716 if (ret) {
717 dev_err(qproc->dev, "failed to enable reset clocks\n");
718 goto disable_vdd;
719 }
720
721 ret = q6v5_reset_deassert(qproc);
722 if (ret) {
723 dev_err(qproc->dev, "failed to deassert mss restart\n");
724 goto disable_reset_clks;
725 }
726
727 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
728 qproc->active_clk_count);
729 if (ret) {
730 dev_err(qproc->dev, "failed to enable clocks\n");
731 goto assert_reset;
732 }
733
734 /* Assign MBA image access in DDR to q6 */
735 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
736 qproc->mba_phys, qproc->mba_size);
737 if (ret) {
738 dev_err(qproc->dev,
739 "assigning Q6 access to mba memory failed: %d\n", ret);
740 goto disable_active_clks;
741 }
742
743 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
744
745 ret = q6v5proc_reset(qproc);
746 if (ret)
747 goto reclaim_mba;
748
749 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
750 if (ret == -ETIMEDOUT) {
751 dev_err(qproc->dev, "MBA boot timed out\n");
752 goto halt_axi_ports;
753 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
754 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
755 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
756 ret = -EINVAL;
757 goto halt_axi_ports;
758 }
759
760 qproc->dump_mba_loaded = true;
761 return 0;
762
763halt_axi_ports:
764 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
765 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
766 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
767
768reclaim_mba:
769 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
770 qproc->mba_phys,
771 qproc->mba_size);
772 if (xfermemop_ret) {
773 dev_err(qproc->dev,
774 "Failed to reclaim mba buffer, system may become unstable\n");
775 }
776
777disable_active_clks:
778 q6v5_clk_disable(qproc->dev, qproc->active_clks,
779 qproc->active_clk_count);
780assert_reset:
781 q6v5_reset_assert(qproc);
782disable_reset_clks:
783 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
784 qproc->reset_clk_count);
785disable_vdd:
786 q6v5_regulator_disable(qproc, qproc->active_regs,
787 qproc->active_reg_count);
788disable_proxy_clk:
789 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
790 qproc->proxy_clk_count);
791disable_proxy_reg:
792 q6v5_regulator_disable(qproc, qproc->proxy_regs,
793 qproc->proxy_reg_count);
794disable_irqs:
795 qcom_q6v5_unprepare(&qproc->q6v5);
796
797 return ret;
798}
799
800static void q6v5_mba_reclaim(struct q6v5 *qproc)
801{
802 int ret;
803 u32 val;
804
805 qproc->dump_mba_loaded = false;
806
807 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
808 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
809 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
810 if (qproc->version == MSS_MSM8996) {
811 /*
812 * To avoid high MX current during LPASS/MSS restart.
813 */
814 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
815 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
816 QDSP6v56_CLAMP_QMC_MEM;
817 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
818 }
819
820 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
821 false, qproc->mpss_phys,
822 qproc->mpss_size);
823 WARN_ON(ret);
824
825 q6v5_reset_assert(qproc);
826
827 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
828 qproc->reset_clk_count);
829 q6v5_clk_disable(qproc->dev, qproc->active_clks,
830 qproc->active_clk_count);
831 q6v5_regulator_disable(qproc, qproc->active_regs,
832 qproc->active_reg_count);
833
834 /* In case of failure or coredump scenario where reclaiming MBA memory
835 * could not happen reclaim it here.
836 */
837 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
838 qproc->mba_phys,
839 qproc->mba_size);
840 WARN_ON(ret);
841
842 ret = qcom_q6v5_unprepare(&qproc->q6v5);
843 if (ret) {
844 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
845 qproc->proxy_clk_count);
846 q6v5_regulator_disable(qproc, qproc->proxy_regs,
847 qproc->proxy_reg_count);
848 }
849}
850
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800851static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700852{
853 const struct elf32_phdr *phdrs;
854 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800855 const struct firmware *seg_fw;
856 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700857 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800858 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700859 phys_addr_t boot_addr;
Stefan Agnerd7dc8992018-06-14 15:28:02 -0700860 phys_addr_t min_addr = PHYS_ADDR_MAX;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800861 phys_addr_t max_addr = 0;
862 bool relocate = false;
863 char seg_name[10];
Bjorn Andersson01625cc52017-02-15 14:00:41 -0800864 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530865 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800866 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700867 int ret;
868 int i;
869
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800870 ret = request_firmware(&fw, "modem.mdt", qproc->dev);
871 if (ret < 0) {
872 dev_err(qproc->dev, "unable to load modem.mdt\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700873 return ret;
874 }
875
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800876 /* Initialize the RMB validator */
877 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
878
879 ret = q6v5_mpss_init_image(qproc, fw);
880 if (ret)
881 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700882
883 ehdr = (struct elf32_hdr *)fw->data;
884 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800885
886 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -0700887 phdr = &phdrs[i];
888
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800889 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700890 continue;
891
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800892 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
893 relocate = true;
894
895 if (phdr->p_paddr < min_addr)
896 min_addr = phdr->p_paddr;
897
898 if (phdr->p_paddr + phdr->p_memsz > max_addr)
899 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
900 }
901
902 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Sibi Sankar3bf62eb2018-07-27 20:50:03 +0530903 qproc->mpss_reloc = mpss_reloc;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530904 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800905 for (i = 0; i < ehdr->e_phnum; i++) {
906 phdr = &phdrs[i];
907
908 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700909 continue;
910
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800911 offset = phdr->p_paddr - mpss_reloc;
912 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
913 dev_err(qproc->dev, "segment outside memory range\n");
914 ret = -EINVAL;
915 goto release_firmware;
916 }
917
918 ptr = qproc->mpss_region + offset;
919
920 if (phdr->p_filesz) {
921 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
922 ret = request_firmware(&seg_fw, seg_name, qproc->dev);
923 if (ret) {
924 dev_err(qproc->dev, "failed to load %s\n", seg_name);
925 goto release_firmware;
926 }
927
928 memcpy(ptr, seg_fw->data, seg_fw->size);
929
930 release_firmware(seg_fw);
931 }
932
933 if (phdr->p_memsz > phdr->p_filesz) {
934 memset(ptr + phdr->p_filesz, 0,
935 phdr->p_memsz - phdr->p_filesz);
936 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700937 size += phdr->p_memsz;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700938 }
939
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530940 /* Transfer ownership of modem ddr region to q6 */
941 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
942 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800943 if (ret) {
944 dev_err(qproc->dev,
945 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100946 ret = -EAGAIN;
947 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800948 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530949
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530950 boot_addr = relocate ? qproc->mpss_phys : min_addr;
951 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
952 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
953 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
954
Bjorn Andersson72beb492016-07-12 17:15:45 -0700955 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
956 if (ret == -ETIMEDOUT)
957 dev_err(qproc->dev, "MPSS authentication timed out\n");
958 else if (ret < 0)
959 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
960
Bjorn Andersson051fb702016-06-20 14:28:41 -0700961release_firmware:
962 release_firmware(fw);
963
964 return ret < 0 ? ret : 0;
965}
966
Sibi Sankar7dd8ade22018-10-17 19:25:26 +0530967static void qcom_q6v5_dump_segment(struct rproc *rproc,
968 struct rproc_dump_segment *segment,
969 void *dest)
970{
971 int ret = 0;
972 struct q6v5 *qproc = rproc->priv;
973 unsigned long mask = BIT((unsigned long)segment->priv);
974 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
975
976 /* Unlock mba before copying segments */
977 if (!qproc->dump_mba_loaded)
978 ret = q6v5_mba_load(qproc);
979
980 if (!ptr || ret)
981 memset(dest, 0xff, segment->size);
982 else
983 memcpy(dest, ptr, segment->size);
984
985 qproc->dump_segment_mask |= mask;
986
987 /* Reclaim mba after copying segments */
988 if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
989 if (qproc->dump_mba_loaded)
990 q6v5_mba_reclaim(qproc);
991 }
992}
993
Bjorn Andersson051fb702016-06-20 14:28:41 -0700994static int q6v5_start(struct rproc *rproc)
995{
996 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530997 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700998 int ret;
999
Sibi Sankar03045302018-10-17 19:25:25 +05301000 ret = q6v5_mba_load(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001001 if (ret)
Sibi Sankar03045302018-10-17 19:25:25 +05301002 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001003
1004 dev_info(qproc->dev, "MBA booted, loading mpss\n");
1005
1006 ret = q6v5_mpss_load(qproc);
1007 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301008 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001009
Bjorn Andersson7d674732018-06-04 13:30:38 -07001010 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1011 if (ret == -ETIMEDOUT) {
Bjorn Andersson051fb702016-06-20 14:28:41 -07001012 dev_err(qproc->dev, "start timed out\n");
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301013 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001014 }
1015
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301016 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1017 qproc->mba_phys,
1018 qproc->mba_size);
1019 if (xfermemop_ret)
1020 dev_err(qproc->dev,
1021 "Failed to reclaim mba buffer system may become unstable\n");
Sibi Sankar7dd8ade22018-10-17 19:25:26 +05301022
1023 /* Reset Dump Segment Mask */
1024 qproc->dump_segment_mask = 0;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001025 qproc->running = true;
1026
Bjorn Andersson051fb702016-06-20 14:28:41 -07001027 return 0;
1028
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301029reclaim_mpss:
1030 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1031 false, qproc->mpss_phys,
1032 qproc->mpss_size);
1033 WARN_ON(xfermemop_ret);
Sibi Sankar03045302018-10-17 19:25:25 +05301034 q6v5_mba_reclaim(qproc);
Sibi Sankar663e9842018-05-21 22:57:09 +05301035
Bjorn Andersson051fb702016-06-20 14:28:41 -07001036 return ret;
1037}
1038
1039static int q6v5_stop(struct rproc *rproc)
1040{
1041 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1042 int ret;
1043
1044 qproc->running = false;
1045
Bjorn Andersson7d674732018-06-04 13:30:38 -07001046 ret = qcom_q6v5_request_stop(&qproc->q6v5);
1047 if (ret == -ETIMEDOUT)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001048 dev_err(qproc->dev, "timed out on wait\n");
1049
Sibi Sankar03045302018-10-17 19:25:25 +05301050 q6v5_mba_reclaim(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001051
1052 return 0;
1053}
1054
1055static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
1056{
1057 struct q6v5 *qproc = rproc->priv;
1058 int offset;
1059
1060 offset = da - qproc->mpss_reloc;
1061 if (offset < 0 || offset + len > qproc->mpss_size)
1062 return NULL;
1063
1064 return qproc->mpss_region + offset;
1065}
1066
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301067static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1068 const struct firmware *mba_fw)
1069{
1070 const struct firmware *fw;
1071 const struct elf32_phdr *phdrs;
1072 const struct elf32_phdr *phdr;
1073 const struct elf32_hdr *ehdr;
1074 struct q6v5 *qproc = rproc->priv;
1075 unsigned long i;
1076 int ret;
1077
1078 ret = request_firmware(&fw, "modem.mdt", qproc->dev);
1079 if (ret < 0) {
1080 dev_err(qproc->dev, "unable to load modem.mdt\n");
1081 return ret;
1082 }
1083
1084 ehdr = (struct elf32_hdr *)fw->data;
1085 phdrs = (struct elf32_phdr *)(ehdr + 1);
1086 qproc->dump_complete_mask = 0;
1087
1088 for (i = 0; i < ehdr->e_phnum; i++) {
1089 phdr = &phdrs[i];
1090
1091 if (!q6v5_phdr_valid(phdr))
1092 continue;
1093
1094 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1095 phdr->p_memsz,
1096 qcom_q6v5_dump_segment,
1097 (void *)i);
1098 if (ret)
1099 break;
1100
1101 qproc->dump_complete_mask |= BIT(i);
1102 }
1103
1104 release_firmware(fw);
1105 return ret;
1106}
1107
Bjorn Andersson051fb702016-06-20 14:28:41 -07001108static const struct rproc_ops q6v5_ops = {
1109 .start = q6v5_start,
1110 .stop = q6v5_stop,
1111 .da_to_va = q6v5_da_to_va,
Sibi Sankarf18b7e92018-10-17 19:25:27 +05301112 .parse_fw = qcom_q6v5_register_dump_segments,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -08001113 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -07001114};
1115
Bjorn Andersson7d674732018-06-04 13:30:38 -07001116static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001117{
Bjorn Andersson7d674732018-06-04 13:30:38 -07001118 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +05301119
1120 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1121 qproc->proxy_clk_count);
1122 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1123 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001124}
1125
1126static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1127{
1128 struct of_phandle_args args;
1129 struct resource *res;
1130 int ret;
1131
1132 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1133 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001134 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001135 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001136
1137 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1138 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001139 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001140 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001141
1142 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1143 "qcom,halt-regs", 3, 0, &args);
1144 if (ret < 0) {
1145 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1146 return -EINVAL;
1147 }
1148
1149 qproc->halt_map = syscon_node_to_regmap(args.np);
1150 of_node_put(args.np);
1151 if (IS_ERR(qproc->halt_map))
1152 return PTR_ERR(qproc->halt_map);
1153
1154 qproc->halt_q6 = args.args[0];
1155 qproc->halt_modem = args.args[1];
1156 qproc->halt_nc = args.args[2];
1157
1158 return 0;
1159}
1160
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301161static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1162 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001163{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301164 int i;
1165
1166 if (!clk_names)
1167 return 0;
1168
1169 for (i = 0; clk_names[i]; i++) {
1170 clks[i] = devm_clk_get(dev, clk_names[i]);
1171 if (IS_ERR(clks[i])) {
1172 int rc = PTR_ERR(clks[i]);
1173
1174 if (rc != -EPROBE_DEFER)
1175 dev_err(dev, "Failed to get %s clock\n",
1176 clk_names[i]);
1177 return rc;
1178 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001179 }
1180
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301181 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001182}
1183
1184static int q6v5_init_reset(struct q6v5 *qproc)
1185{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001186 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
Sibi Sankar9e483ef2018-08-30 00:42:14 +05301187 "mss_restart");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001188 if (IS_ERR(qproc->mss_restart)) {
1189 dev_err(qproc->dev, "failed to acquire mss restart\n");
1190 return PTR_ERR(qproc->mss_restart);
1191 }
1192
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301193 if (qproc->has_alt_reset) {
1194 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1195 "pdc_reset");
1196 if (IS_ERR(qproc->pdc_reset)) {
1197 dev_err(qproc->dev, "failed to acquire pdc reset\n");
1198 return PTR_ERR(qproc->pdc_reset);
1199 }
1200 }
1201
Bjorn Andersson051fb702016-06-20 14:28:41 -07001202 return 0;
1203}
1204
Bjorn Andersson051fb702016-06-20 14:28:41 -07001205static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1206{
1207 struct device_node *child;
1208 struct device_node *node;
1209 struct resource r;
1210 int ret;
1211
1212 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1213 node = of_parse_phandle(child, "memory-region", 0);
1214 ret = of_address_to_resource(node, 0, &r);
1215 if (ret) {
1216 dev_err(qproc->dev, "unable to resolve mba region\n");
1217 return ret;
1218 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001219 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001220
1221 qproc->mba_phys = r.start;
1222 qproc->mba_size = resource_size(&r);
1223 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1224 if (!qproc->mba_region) {
1225 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1226 &r.start, qproc->mba_size);
1227 return -EBUSY;
1228 }
1229
1230 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1231 node = of_parse_phandle(child, "memory-region", 0);
1232 ret = of_address_to_resource(node, 0, &r);
1233 if (ret) {
1234 dev_err(qproc->dev, "unable to resolve mpss region\n");
1235 return ret;
1236 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001237 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001238
1239 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1240 qproc->mpss_size = resource_size(&r);
1241 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1242 if (!qproc->mpss_region) {
1243 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1244 &r.start, qproc->mpss_size);
1245 return -EBUSY;
1246 }
1247
1248 return 0;
1249}
1250
1251static int q6v5_probe(struct platform_device *pdev)
1252{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301253 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001254 struct q6v5 *qproc;
1255 struct rproc *rproc;
1256 int ret;
1257
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301258 desc = of_device_get_match_data(&pdev->dev);
1259 if (!desc)
1260 return -EINVAL;
1261
Brian Norrisbbcda302018-10-08 19:08:05 -07001262 if (desc->need_mem_protection && !qcom_scm_is_available())
1263 return -EPROBE_DEFER;
1264
Bjorn Andersson051fb702016-06-20 14:28:41 -07001265 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301266 desc->hexagon_mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001267 if (!rproc) {
1268 dev_err(&pdev->dev, "failed to allocate rproc\n");
1269 return -ENOMEM;
1270 }
1271
Bjorn Andersson051fb702016-06-20 14:28:41 -07001272 qproc = (struct q6v5 *)rproc->priv;
1273 qproc->dev = &pdev->dev;
1274 qproc->rproc = rproc;
1275 platform_set_drvdata(pdev, qproc);
1276
Bjorn Andersson051fb702016-06-20 14:28:41 -07001277 ret = q6v5_init_mem(qproc, pdev);
1278 if (ret)
1279 goto free_rproc;
1280
1281 ret = q6v5_alloc_memory_region(qproc);
1282 if (ret)
1283 goto free_rproc;
1284
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301285 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1286 desc->proxy_clk_names);
1287 if (ret < 0) {
1288 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001289 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301290 }
1291 qproc->proxy_clk_count = ret;
1292
Sibi Sankar231f67d2018-05-21 22:57:13 +05301293 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1294 desc->reset_clk_names);
1295 if (ret < 0) {
1296 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1297 goto free_rproc;
1298 }
1299 qproc->reset_clk_count = ret;
1300
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301301 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1302 desc->active_clk_names);
1303 if (ret < 0) {
1304 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1305 goto free_rproc;
1306 }
1307 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001308
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301309 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1310 desc->proxy_supply);
1311 if (ret < 0) {
1312 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001313 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301314 }
1315 qproc->proxy_reg_count = ret;
1316
1317 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1318 desc->active_supply);
1319 if (ret < 0) {
1320 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1321 goto free_rproc;
1322 }
1323 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001324
Sibi Sankar29a5f9a2018-08-30 00:42:15 +05301325 qproc->has_alt_reset = desc->has_alt_reset;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001326 ret = q6v5_init_reset(qproc);
1327 if (ret)
1328 goto free_rproc;
1329
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301330 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301331 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson7d674732018-06-04 13:30:38 -07001332
1333 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1334 qcom_msa_handover);
1335 if (ret)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001336 goto free_rproc;
1337
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301338 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1339 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Sibi Sankar47254962018-05-21 22:57:14 +05301340 qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001341 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001342 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001343 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001344
Bjorn Andersson051fb702016-06-20 14:28:41 -07001345 ret = rproc_add(rproc);
1346 if (ret)
1347 goto free_rproc;
1348
1349 return 0;
1350
1351free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001352 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001353
1354 return ret;
1355}
1356
1357static int q6v5_remove(struct platform_device *pdev)
1358{
1359 struct q6v5 *qproc = platform_get_drvdata(pdev);
1360
1361 rproc_del(qproc->rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001362
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001363 qcom_remove_sysmon_subdev(qproc->sysmon);
Sibi Sankar47254962018-05-21 22:57:14 +05301364 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001365 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001366 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001367 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001368
1369 return 0;
1370}
1371
Sibi Sankar231f67d2018-05-21 22:57:13 +05301372static const struct rproc_hexagon_res sdm845_mss = {
1373 .hexagon_mba_image = "mba.mbn",
1374 .proxy_clk_names = (char*[]){
1375 "xo",
Sibi Sankar231f67d2018-05-21 22:57:13 +05301376 "prng",
1377 NULL
1378 },
1379 .reset_clk_names = (char*[]){
1380 "iface",
1381 "snoc_axi",
1382 NULL
1383 },
1384 .active_clk_names = (char*[]){
1385 "bus",
1386 "mem",
1387 "gpll0_mss",
1388 "mnoc_axi",
1389 NULL
1390 },
1391 .need_mem_protection = true,
1392 .has_alt_reset = true,
1393 .version = MSS_SDM845,
1394};
1395
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301396static const struct rproc_hexagon_res msm8996_mss = {
1397 .hexagon_mba_image = "mba.mbn",
Sibi Sankar47b87472018-12-29 00:23:05 +05301398 .proxy_supply = (struct qcom_mss_reg_res[]) {
1399 {
1400 .supply = "pll",
1401 .uA = 100000,
1402 },
1403 {}
1404 },
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301405 .proxy_clk_names = (char*[]){
1406 "xo",
1407 "pnoc",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301408 "qdss",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301409 NULL
1410 },
1411 .active_clk_names = (char*[]){
1412 "iface",
1413 "bus",
1414 "mem",
Sibi Sankar80ec4192018-12-29 00:23:03 +05301415 "gpll0_mss",
1416 "snoc_axi",
1417 "mnoc_axi",
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301418 NULL
1419 },
1420 .need_mem_protection = true,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301421 .has_alt_reset = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301422 .version = MSS_MSM8996,
1423};
1424
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301425static const struct rproc_hexagon_res msm8916_mss = {
1426 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301427 .proxy_supply = (struct qcom_mss_reg_res[]) {
1428 {
1429 .supply = "mx",
1430 .uV = 1050000,
1431 },
1432 {
1433 .supply = "cx",
1434 .uA = 100000,
1435 },
1436 {
1437 .supply = "pll",
1438 .uA = 100000,
1439 },
1440 {}
1441 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301442 .proxy_clk_names = (char*[]){
1443 "xo",
1444 NULL
1445 },
1446 .active_clk_names = (char*[]){
1447 "iface",
1448 "bus",
1449 "mem",
1450 NULL
1451 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301452 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301453 .has_alt_reset = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301454 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301455};
1456
1457static const struct rproc_hexagon_res msm8974_mss = {
1458 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301459 .proxy_supply = (struct qcom_mss_reg_res[]) {
1460 {
1461 .supply = "mx",
1462 .uV = 1050000,
1463 },
1464 {
1465 .supply = "cx",
1466 .uA = 100000,
1467 },
1468 {
1469 .supply = "pll",
1470 .uA = 100000,
1471 },
1472 {}
1473 },
1474 .active_supply = (struct qcom_mss_reg_res[]) {
1475 {
1476 .supply = "mss",
1477 .uV = 1050000,
1478 .uA = 100000,
1479 },
1480 {}
1481 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301482 .proxy_clk_names = (char*[]){
1483 "xo",
1484 NULL
1485 },
1486 .active_clk_names = (char*[]){
1487 "iface",
1488 "bus",
1489 "mem",
1490 NULL
1491 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301492 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301493 .has_alt_reset = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301494 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301495};
1496
Bjorn Andersson051fb702016-06-20 14:28:41 -07001497static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301498 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1499 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1500 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301501 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Sibi Sankar231f67d2018-05-21 22:57:13 +05301502 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001503 { },
1504};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001505MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001506
1507static struct platform_driver q6v5_driver = {
1508 .probe = q6v5_probe,
1509 .remove = q6v5_remove,
1510 .driver = {
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001511 .name = "qcom-q6v5-mss",
Bjorn Andersson051fb702016-06-20 14:28:41 -07001512 .of_match_table = q6v5_of_match,
1513 },
1514};
1515module_platform_driver(q6v5_driver);
1516
Bjorn Anderssonef73c222018-09-24 16:45:26 -07001517MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001518MODULE_LICENSE("GPL v2");