blob: b4e5e725848d263936d93b7bbe1645dd94e37ce1 [file] [log] [blame]
Bjorn Andersson051fb702016-06-20 14:28:41 -07001/*
2 * Qualcomm Peripheral Image Loader
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/mfd/syscon.h>
24#include <linux/module.h>
25#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053026#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070027#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/regulator/consumer.h>
30#include <linux/remoteproc.h>
31#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080032#include <linux/soc/qcom/mdt_loader.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070033#include <linux/soc/qcom/smem.h>
34#include <linux/soc/qcom/smem_state.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053035#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070036
37#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080038#include "qcom_common.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070039
40#include <linux/qcom_scm.h>
41
Bjorn Andersson051fb702016-06-20 14:28:41 -070042#define MPSS_CRASH_REASON_SMEM 421
43
44/* RMB Status Register Values */
45#define RMB_PBL_SUCCESS 0x1
46
47#define RMB_MBA_XPU_UNLOCKED 0x1
48#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
49#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
50#define RMB_MBA_AUTH_COMPLETE 0x4
51
52/* PBL/MBA interface registers */
53#define RMB_MBA_IMAGE_REG 0x00
54#define RMB_PBL_STATUS_REG 0x04
55#define RMB_MBA_COMMAND_REG 0x08
56#define RMB_MBA_STATUS_REG 0x0C
57#define RMB_PMI_META_DATA_REG 0x10
58#define RMB_PMI_CODE_START_REG 0x14
59#define RMB_PMI_CODE_LENGTH_REG 0x18
60
61#define RMB_CMD_META_DATA_READY 0x1
62#define RMB_CMD_LOAD_READY 0x2
63
64/* QDSP6SS Register Offsets */
65#define QDSP6SS_RESET_REG 0x014
66#define QDSP6SS_GFMUX_CTL_REG 0x020
67#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053068#define QDSP6SS_MEM_PWR_CTL 0x0B0
69#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070070
71/* AXI Halt Register Offsets */
72#define AXI_HALTREQ_REG 0x0
73#define AXI_HALTACK_REG 0x4
74#define AXI_IDLE_REG 0x8
75
76#define HALT_ACK_TIMEOUT_MS 100
77
78/* QDSP6SS_RESET */
79#define Q6SS_STOP_CORE BIT(0)
80#define Q6SS_CORE_ARES BIT(1)
81#define Q6SS_BUS_ARES_ENABLE BIT(2)
82
83/* QDSP6SS_GFMUX_CTL */
84#define Q6SS_CLK_ENABLE BIT(1)
85
86/* QDSP6SS_PWR_CTL */
87#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
88#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
89#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
90#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
91#define Q6SS_ETB_SLP_NRET_N BIT(17)
92#define Q6SS_L2DATA_STBY_N BIT(18)
93#define Q6SS_SLP_RET_N BIT(19)
94#define Q6SS_CLAMP_IO BIT(20)
95#define QDSS_BHS_ON BIT(21)
96#define QDSS_LDO_BYP BIT(22)
97
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053098/* QDSP6v56 parameters */
99#define QDSP6v56_LDO_BYP BIT(25)
100#define QDSP6v56_BHS_ON BIT(24)
101#define QDSP6v56_CLAMP_WL BIT(21)
102#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
103#define HALT_CHECK_MAX_LOOPS 200
104#define QDSP6SS_XO_CBCR 0x0038
105#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
106
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530107struct reg_info {
108 struct regulator *reg;
109 int uV;
110 int uA;
111};
112
113struct qcom_mss_reg_res {
114 const char *supply;
115 int uV;
116 int uA;
117};
118
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530119struct rproc_hexagon_res {
120 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100121 struct qcom_mss_reg_res *proxy_supply;
122 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530123 char **proxy_clk_names;
124 char **active_clk_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530125 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530126 bool need_mem_protection;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530127};
128
Bjorn Andersson051fb702016-06-20 14:28:41 -0700129struct q6v5 {
130 struct device *dev;
131 struct rproc *rproc;
132
133 void __iomem *reg_base;
134 void __iomem *rmb_base;
135
136 struct regmap *halt_map;
137 u32 halt_q6;
138 u32 halt_modem;
139 u32 halt_nc;
140
141 struct reset_control *mss_restart;
142
143 struct qcom_smem_state *state;
144 unsigned stop_bit;
145
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530146 struct clk *active_clks[8];
147 struct clk *proxy_clks[4];
148 int active_clk_count;
149 int proxy_clk_count;
150
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530151 struct reg_info active_regs[1];
152 struct reg_info proxy_regs[3];
153 int active_reg_count;
154 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700155
156 struct completion start_done;
157 struct completion stop_done;
158 bool running;
159
160 phys_addr_t mba_phys;
161 void *mba_region;
162 size_t mba_size;
163
164 phys_addr_t mpss_phys;
165 phys_addr_t mpss_reloc;
166 void *mpss_region;
167 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800168
169 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700170 struct qcom_rproc_ssr ssr_subdev;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530171 bool need_mem_protection;
172 int mpss_perm;
173 int mba_perm;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530174 int version;
175};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530176
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530177enum {
178 MSS_MSM8916,
179 MSS_MSM8974,
180 MSS_MSM8996,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700181};
182
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530183static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
184 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700185{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530186 int rc;
187 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700188
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800189 if (!reg_res)
190 return 0;
191
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530192 for (i = 0; reg_res[i].supply; i++) {
193 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
194 if (IS_ERR(regs[i].reg)) {
195 rc = PTR_ERR(regs[i].reg);
196 if (rc != -EPROBE_DEFER)
197 dev_err(dev, "Failed to get %s\n regulator",
198 reg_res[i].supply);
199 return rc;
200 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700201
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530202 regs[i].uV = reg_res[i].uV;
203 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700204 }
205
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530206 return i;
207}
208
209static int q6v5_regulator_enable(struct q6v5 *qproc,
210 struct reg_info *regs, int count)
211{
212 int ret;
213 int i;
214
215 for (i = 0; i < count; i++) {
216 if (regs[i].uV > 0) {
217 ret = regulator_set_voltage(regs[i].reg,
218 regs[i].uV, INT_MAX);
219 if (ret) {
220 dev_err(qproc->dev,
221 "Failed to request voltage for %d.\n",
222 i);
223 goto err;
224 }
225 }
226
227 if (regs[i].uA > 0) {
228 ret = regulator_set_load(regs[i].reg,
229 regs[i].uA);
230 if (ret < 0) {
231 dev_err(qproc->dev,
232 "Failed to set regulator mode\n");
233 goto err;
234 }
235 }
236
237 ret = regulator_enable(regs[i].reg);
238 if (ret) {
239 dev_err(qproc->dev, "Regulator enable failed\n");
240 goto err;
241 }
242 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700243
244 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530245err:
246 for (; i >= 0; i--) {
247 if (regs[i].uV > 0)
248 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
249
250 if (regs[i].uA > 0)
251 regulator_set_load(regs[i].reg, 0);
252
253 regulator_disable(regs[i].reg);
254 }
255
256 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700257}
258
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530259static void q6v5_regulator_disable(struct q6v5 *qproc,
260 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700261{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530262 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700263
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530264 for (i = 0; i < count; i++) {
265 if (regs[i].uV > 0)
266 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700267
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530268 if (regs[i].uA > 0)
269 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700270
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530271 regulator_disable(regs[i].reg);
272 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700273}
274
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530275static int q6v5_clk_enable(struct device *dev,
276 struct clk **clks, int count)
277{
278 int rc;
279 int i;
280
281 for (i = 0; i < count; i++) {
282 rc = clk_prepare_enable(clks[i]);
283 if (rc) {
284 dev_err(dev, "Clock enable failed\n");
285 goto err;
286 }
287 }
288
289 return 0;
290err:
291 for (i--; i >= 0; i--)
292 clk_disable_unprepare(clks[i]);
293
294 return rc;
295}
296
297static void q6v5_clk_disable(struct device *dev,
298 struct clk **clks, int count)
299{
300 int i;
301
302 for (i = 0; i < count; i++)
303 clk_disable_unprepare(clks[i]);
304}
305
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530306static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
307 bool remote_owner, phys_addr_t addr,
308 size_t size)
309{
310 struct qcom_scm_vmperm next;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530311
312 if (!qproc->need_mem_protection)
313 return 0;
314 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
315 return 0;
316 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
317 return 0;
318
319 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
320 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
321
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800322 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
323 current_perm, &next, 1);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530324}
325
Bjorn Andersson051fb702016-06-20 14:28:41 -0700326static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
327{
328 struct q6v5 *qproc = rproc->priv;
329
330 memcpy(qproc->mba_region, fw->data, fw->size);
331
332 return 0;
333}
334
Bjorn Andersson051fb702016-06-20 14:28:41 -0700335static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
336{
337 unsigned long timeout;
338 s32 val;
339
340 timeout = jiffies + msecs_to_jiffies(ms);
341 for (;;) {
342 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
343 if (val)
344 break;
345
346 if (time_after(jiffies, timeout))
347 return -ETIMEDOUT;
348
349 msleep(1);
350 }
351
352 return val;
353}
354
355static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
356{
357
358 unsigned long timeout;
359 s32 val;
360
361 timeout = jiffies + msecs_to_jiffies(ms);
362 for (;;) {
363 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
364 if (val < 0)
365 break;
366
367 if (!status && val)
368 break;
369 else if (status && val == status)
370 break;
371
372 if (time_after(jiffies, timeout))
373 return -ETIMEDOUT;
374
375 msleep(1);
376 }
377
378 return val;
379}
380
381static int q6v5proc_reset(struct q6v5 *qproc)
382{
383 u32 val;
384 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530385 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700386
Bjorn Andersson051fb702016-06-20 14:28:41 -0700387
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530388 if (qproc->version == MSS_MSM8996) {
389 /* Override the ACC value if required */
390 writel(QDSP6SS_ACC_OVERRIDE_VAL,
391 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700392
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530393 /* Assert resets, stop core */
394 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
395 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
396 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700397
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530398 /* BHS require xo cbcr to be enabled */
399 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
400 val |= 0x1;
401 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
402
403 /* Read CLKOFF bit to go low indicating CLK is enabled */
404 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
405 val, !(val & BIT(31)), 1,
406 HALT_CHECK_MAX_LOOPS);
407 if (ret) {
408 dev_err(qproc->dev,
409 "xo cbcr enabling timed out (rc:%d)\n", ret);
410 return ret;
411 }
412 /* Enable power block headswitch and wait for it to stabilize */
413 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
414 val |= QDSP6v56_BHS_ON;
415 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
416 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
417 udelay(1);
418
419 /* Put LDO in bypass mode */
420 val |= QDSP6v56_LDO_BYP;
421 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
422
423 /* Deassert QDSP6 compiler memory clamp */
424 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
425 val &= ~QDSP6v56_CLAMP_QMC_MEM;
426 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
427
428 /* Deassert memory peripheral sleep and L2 memory standby */
429 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
430 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
431
432 /* Turn on L1, L2, ETB and JU memories 1 at a time */
433 val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
434 for (i = 19; i >= 0; i--) {
435 val |= BIT(i);
436 writel(val, qproc->reg_base +
437 QDSP6SS_MEM_PWR_CTL);
438 /*
439 * Read back value to ensure the write is done then
440 * wait for 1us for both memory peripheral and data
441 * array to turn on.
442 */
443 val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
444 udelay(1);
445 }
446 /* Remove word line clamp */
447 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
448 val &= ~QDSP6v56_CLAMP_WL;
449 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
450 } else {
451 /* Assert resets, stop core */
452 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
453 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
454 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
455
456 /* Enable power block headswitch and wait for it to stabilize */
457 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
458 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
459 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
460 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
461 udelay(1);
462 /*
463 * Turn on memories. L2 banks should be done individually
464 * to minimize inrush current.
465 */
466 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
467 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
468 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
469 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
470 val |= Q6SS_L2DATA_SLP_NRET_N_2;
471 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
472 val |= Q6SS_L2DATA_SLP_NRET_N_1;
473 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
474 val |= Q6SS_L2DATA_SLP_NRET_N_0;
475 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
476 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700477 /* Remove IO clamp */
478 val &= ~Q6SS_CLAMP_IO;
479 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
480
481 /* Bring core out of reset */
482 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
483 val &= ~Q6SS_CORE_ARES;
484 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
485
486 /* Turn on core clock */
487 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
488 val |= Q6SS_CLK_ENABLE;
489 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
490
491 /* Start core execution */
492 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
493 val &= ~Q6SS_STOP_CORE;
494 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
495
496 /* Wait for PBL status */
497 ret = q6v5_rmb_pbl_wait(qproc, 1000);
498 if (ret == -ETIMEDOUT) {
499 dev_err(qproc->dev, "PBL boot timed out\n");
500 } else if (ret != RMB_PBL_SUCCESS) {
501 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
502 ret = -EINVAL;
503 } else {
504 ret = 0;
505 }
506
507 return ret;
508}
509
510static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
511 struct regmap *halt_map,
512 u32 offset)
513{
514 unsigned long timeout;
515 unsigned int val;
516 int ret;
517
518 /* Check if we're already idle */
519 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
520 if (!ret && val)
521 return;
522
523 /* Assert halt request */
524 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
525
526 /* Wait for halt */
527 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
528 for (;;) {
529 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
530 if (ret || val || time_after(jiffies, timeout))
531 break;
532
533 msleep(1);
534 }
535
536 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
537 if (ret || !val)
538 dev_err(qproc->dev, "port failed halt\n");
539
540 /* Clear halt request (port will remain halted until reset) */
541 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
542}
543
544static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
545{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700546 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700547 dma_addr_t phys;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530548 int mdata_perm;
549 int xferop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700550 void *ptr;
551 int ret;
552
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700553 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700554 if (!ptr) {
555 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
556 return -ENOMEM;
557 }
558
559 memcpy(ptr, fw->data, fw->size);
560
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530561 /* Hypervisor mapping to access metadata by modem */
562 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
563 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
564 true, phys, fw->size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800565 if (ret) {
566 dev_err(qproc->dev,
567 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100568 ret = -EAGAIN;
569 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800570 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530571
Bjorn Andersson051fb702016-06-20 14:28:41 -0700572 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
573 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
574
575 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
576 if (ret == -ETIMEDOUT)
577 dev_err(qproc->dev, "MPSS header authentication timed out\n");
578 else if (ret < 0)
579 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
580
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530581 /* Metadata authentication done, remove modem access */
582 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
583 false, phys, fw->size);
584 if (xferop_ret)
585 dev_warn(qproc->dev,
586 "mdt buffer not reclaimed system may become unstable\n");
587
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100588free_dma_attrs:
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700589 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700590
591 return ret < 0 ? ret : 0;
592}
593
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800594static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
595{
596 if (phdr->p_type != PT_LOAD)
597 return false;
598
599 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
600 return false;
601
602 if (!phdr->p_memsz)
603 return false;
604
605 return true;
606}
607
608static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700609{
610 const struct elf32_phdr *phdrs;
611 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800612 const struct firmware *seg_fw;
613 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700614 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800615 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700616 phys_addr_t boot_addr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800617 phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
618 phys_addr_t max_addr = 0;
619 bool relocate = false;
620 char seg_name[10];
Bjorn Andersson01625cc52017-02-15 14:00:41 -0800621 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530622 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800623 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700624 int ret;
625 int i;
626
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800627 ret = request_firmware(&fw, "modem.mdt", qproc->dev);
628 if (ret < 0) {
629 dev_err(qproc->dev, "unable to load modem.mdt\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700630 return ret;
631 }
632
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800633 /* Initialize the RMB validator */
634 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
635
636 ret = q6v5_mpss_init_image(qproc, fw);
637 if (ret)
638 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700639
640 ehdr = (struct elf32_hdr *)fw->data;
641 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800642
643 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -0700644 phdr = &phdrs[i];
645
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800646 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700647 continue;
648
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800649 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
650 relocate = true;
651
652 if (phdr->p_paddr < min_addr)
653 min_addr = phdr->p_paddr;
654
655 if (phdr->p_paddr + phdr->p_memsz > max_addr)
656 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
657 }
658
659 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530660 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800661 for (i = 0; i < ehdr->e_phnum; i++) {
662 phdr = &phdrs[i];
663
664 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700665 continue;
666
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800667 offset = phdr->p_paddr - mpss_reloc;
668 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
669 dev_err(qproc->dev, "segment outside memory range\n");
670 ret = -EINVAL;
671 goto release_firmware;
672 }
673
674 ptr = qproc->mpss_region + offset;
675
676 if (phdr->p_filesz) {
677 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
678 ret = request_firmware(&seg_fw, seg_name, qproc->dev);
679 if (ret) {
680 dev_err(qproc->dev, "failed to load %s\n", seg_name);
681 goto release_firmware;
682 }
683
684 memcpy(ptr, seg_fw->data, seg_fw->size);
685
686 release_firmware(seg_fw);
687 }
688
689 if (phdr->p_memsz > phdr->p_filesz) {
690 memset(ptr + phdr->p_filesz, 0,
691 phdr->p_memsz - phdr->p_filesz);
692 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700693 size += phdr->p_memsz;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700694 }
695
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530696 /* Transfer ownership of modem ddr region to q6 */
697 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
698 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800699 if (ret) {
700 dev_err(qproc->dev,
701 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100702 ret = -EAGAIN;
703 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800704 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530705
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530706 boot_addr = relocate ? qproc->mpss_phys : min_addr;
707 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
708 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
709 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
710
Bjorn Andersson72beb492016-07-12 17:15:45 -0700711 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
712 if (ret == -ETIMEDOUT)
713 dev_err(qproc->dev, "MPSS authentication timed out\n");
714 else if (ret < 0)
715 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
716
Bjorn Andersson051fb702016-06-20 14:28:41 -0700717release_firmware:
718 release_firmware(fw);
719
720 return ret < 0 ? ret : 0;
721}
722
723static int q6v5_start(struct rproc *rproc)
724{
725 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530726 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700727 int ret;
728
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530729 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
730 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700731 if (ret) {
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530732 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700733 return ret;
734 }
735
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530736 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
737 qproc->proxy_clk_count);
738 if (ret) {
739 dev_err(qproc->dev, "failed to enable proxy clocks\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530740 goto disable_proxy_reg;
741 }
742
743 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
744 qproc->active_reg_count);
745 if (ret) {
746 dev_err(qproc->dev, "failed to enable supplies\n");
747 goto disable_proxy_clk;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530748 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700749 ret = reset_control_deassert(qproc->mss_restart);
750 if (ret) {
751 dev_err(qproc->dev, "failed to deassert mss restart\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530752 goto disable_vdd;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700753 }
754
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530755 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
756 qproc->active_clk_count);
757 if (ret) {
758 dev_err(qproc->dev, "failed to enable clocks\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700759 goto assert_reset;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530760 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700761
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530762 /* Assign MBA image access in DDR to q6 */
763 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
764 qproc->mba_phys,
765 qproc->mba_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800766 if (xfermemop_ret) {
767 dev_err(qproc->dev,
768 "assigning Q6 access to mba memory failed: %d\n",
769 xfermemop_ret);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530770 goto disable_active_clks;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800771 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530772
Bjorn Andersson051fb702016-06-20 14:28:41 -0700773 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
774
775 ret = q6v5proc_reset(qproc);
776 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530777 goto reclaim_mba;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700778
779 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
780 if (ret == -ETIMEDOUT) {
781 dev_err(qproc->dev, "MBA boot timed out\n");
782 goto halt_axi_ports;
783 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
784 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
785 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
786 ret = -EINVAL;
787 goto halt_axi_ports;
788 }
789
790 dev_info(qproc->dev, "MBA booted, loading mpss\n");
791
792 ret = q6v5_mpss_load(qproc);
793 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530794 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700795
796 ret = wait_for_completion_timeout(&qproc->start_done,
797 msecs_to_jiffies(5000));
798 if (ret == 0) {
799 dev_err(qproc->dev, "start timed out\n");
800 ret = -ETIMEDOUT;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530801 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700802 }
803
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530804 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
805 qproc->mba_phys,
806 qproc->mba_size);
807 if (xfermemop_ret)
808 dev_err(qproc->dev,
809 "Failed to reclaim mba buffer system may become unstable\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700810 qproc->running = true;
811
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530812 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
813 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530814 q6v5_regulator_disable(qproc, qproc->proxy_regs,
815 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700816
817 return 0;
818
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530819reclaim_mpss:
820 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
821 false, qproc->mpss_phys,
822 qproc->mpss_size);
823 WARN_ON(xfermemop_ret);
824
Bjorn Andersson051fb702016-06-20 14:28:41 -0700825halt_axi_ports:
826 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
827 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
828 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530829
830reclaim_mba:
831 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
832 qproc->mba_phys,
833 qproc->mba_size);
834 if (xfermemop_ret) {
835 dev_err(qproc->dev,
836 "Failed to reclaim mba buffer, system may become unstable\n");
837 }
838
839disable_active_clks:
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530840 q6v5_clk_disable(qproc->dev, qproc->active_clks,
841 qproc->active_clk_count);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530842
Bjorn Andersson051fb702016-06-20 14:28:41 -0700843assert_reset:
844 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530845disable_vdd:
846 q6v5_regulator_disable(qproc, qproc->active_regs,
847 qproc->active_reg_count);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530848disable_proxy_clk:
849 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
850 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530851disable_proxy_reg:
852 q6v5_regulator_disable(qproc, qproc->proxy_regs,
853 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700854
855 return ret;
856}
857
858static int q6v5_stop(struct rproc *rproc)
859{
860 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
861 int ret;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530862 u32 val;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700863
864 qproc->running = false;
865
866 qcom_smem_state_update_bits(qproc->state,
867 BIT(qproc->stop_bit), BIT(qproc->stop_bit));
868
869 ret = wait_for_completion_timeout(&qproc->stop_done,
870 msecs_to_jiffies(5000));
871 if (ret == 0)
872 dev_err(qproc->dev, "timed out on wait\n");
873
874 qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
875
876 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
877 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
878 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530879 if (qproc->version == MSS_MSM8996) {
880 /*
881 * To avoid high MX current during LPASS/MSS restart.
882 */
883 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
884 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
885 QDSP6v56_CLAMP_QMC_MEM;
886 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
887 }
888
Bjorn Andersson051fb702016-06-20 14:28:41 -0700889
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530890 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
891 qproc->mpss_phys, qproc->mpss_size);
892 WARN_ON(ret);
893
Bjorn Andersson051fb702016-06-20 14:28:41 -0700894 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530895 q6v5_clk_disable(qproc->dev, qproc->active_clks,
896 qproc->active_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530897 q6v5_regulator_disable(qproc, qproc->active_regs,
898 qproc->active_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700899
900 return 0;
901}
902
903static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
904{
905 struct q6v5 *qproc = rproc->priv;
906 int offset;
907
908 offset = da - qproc->mpss_reloc;
909 if (offset < 0 || offset + len > qproc->mpss_size)
910 return NULL;
911
912 return qproc->mpss_region + offset;
913}
914
915static const struct rproc_ops q6v5_ops = {
916 .start = q6v5_start,
917 .stop = q6v5_stop,
918 .da_to_va = q6v5_da_to_va,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -0800919 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700920};
921
922static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
923{
924 struct q6v5 *qproc = dev;
925 size_t len;
926 char *msg;
927
928 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
929 if (!qproc->running) {
930 complete(&qproc->stop_done);
931 return IRQ_HANDLED;
932 }
933
934 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
935 if (!IS_ERR(msg) && len > 0 && msg[0])
936 dev_err(qproc->dev, "watchdog received: %s\n", msg);
937 else
938 dev_err(qproc->dev, "watchdog without message\n");
939
940 rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
941
942 if (!IS_ERR(msg))
943 msg[0] = '\0';
944
945 return IRQ_HANDLED;
946}
947
948static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
949{
950 struct q6v5 *qproc = dev;
951 size_t len;
952 char *msg;
953
954 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
955 if (!IS_ERR(msg) && len > 0 && msg[0])
956 dev_err(qproc->dev, "fatal error received: %s\n", msg);
957 else
958 dev_err(qproc->dev, "fatal error without message\n");
959
960 rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
961
962 if (!IS_ERR(msg))
963 msg[0] = '\0';
964
965 return IRQ_HANDLED;
966}
967
968static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
969{
970 struct q6v5 *qproc = dev;
971
972 complete(&qproc->start_done);
973 return IRQ_HANDLED;
974}
975
976static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
977{
978 struct q6v5 *qproc = dev;
979
980 complete(&qproc->stop_done);
981 return IRQ_HANDLED;
982}
983
984static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
985{
986 struct of_phandle_args args;
987 struct resource *res;
988 int ret;
989
990 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
991 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +0000992 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700993 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700994
995 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
996 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +0000997 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700998 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700999
1000 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1001 "qcom,halt-regs", 3, 0, &args);
1002 if (ret < 0) {
1003 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1004 return -EINVAL;
1005 }
1006
1007 qproc->halt_map = syscon_node_to_regmap(args.np);
1008 of_node_put(args.np);
1009 if (IS_ERR(qproc->halt_map))
1010 return PTR_ERR(qproc->halt_map);
1011
1012 qproc->halt_q6 = args.args[0];
1013 qproc->halt_modem = args.args[1];
1014 qproc->halt_nc = args.args[2];
1015
1016 return 0;
1017}
1018
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301019static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1020 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001021{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301022 int i;
1023
1024 if (!clk_names)
1025 return 0;
1026
1027 for (i = 0; clk_names[i]; i++) {
1028 clks[i] = devm_clk_get(dev, clk_names[i]);
1029 if (IS_ERR(clks[i])) {
1030 int rc = PTR_ERR(clks[i]);
1031
1032 if (rc != -EPROBE_DEFER)
1033 dev_err(dev, "Failed to get %s clock\n",
1034 clk_names[i]);
1035 return rc;
1036 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001037 }
1038
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301039 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001040}
1041
1042static int q6v5_init_reset(struct q6v5 *qproc)
1043{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001044 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1045 NULL);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001046 if (IS_ERR(qproc->mss_restart)) {
1047 dev_err(qproc->dev, "failed to acquire mss restart\n");
1048 return PTR_ERR(qproc->mss_restart);
1049 }
1050
1051 return 0;
1052}
1053
1054static int q6v5_request_irq(struct q6v5 *qproc,
1055 struct platform_device *pdev,
1056 const char *name,
1057 irq_handler_t thread_fn)
1058{
1059 int ret;
1060
1061 ret = platform_get_irq_byname(pdev, name);
1062 if (ret < 0) {
1063 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
1064 return ret;
1065 }
1066
1067 ret = devm_request_threaded_irq(&pdev->dev, ret,
1068 NULL, thread_fn,
1069 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
1070 "q6v5", qproc);
1071 if (ret)
1072 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
1073
1074 return ret;
1075}
1076
1077static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1078{
1079 struct device_node *child;
1080 struct device_node *node;
1081 struct resource r;
1082 int ret;
1083
1084 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1085 node = of_parse_phandle(child, "memory-region", 0);
1086 ret = of_address_to_resource(node, 0, &r);
1087 if (ret) {
1088 dev_err(qproc->dev, "unable to resolve mba region\n");
1089 return ret;
1090 }
1091
1092 qproc->mba_phys = r.start;
1093 qproc->mba_size = resource_size(&r);
1094 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1095 if (!qproc->mba_region) {
1096 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1097 &r.start, qproc->mba_size);
1098 return -EBUSY;
1099 }
1100
1101 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1102 node = of_parse_phandle(child, "memory-region", 0);
1103 ret = of_address_to_resource(node, 0, &r);
1104 if (ret) {
1105 dev_err(qproc->dev, "unable to resolve mpss region\n");
1106 return ret;
1107 }
1108
1109 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1110 qproc->mpss_size = resource_size(&r);
1111 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1112 if (!qproc->mpss_region) {
1113 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1114 &r.start, qproc->mpss_size);
1115 return -EBUSY;
1116 }
1117
1118 return 0;
1119}
1120
1121static int q6v5_probe(struct platform_device *pdev)
1122{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301123 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001124 struct q6v5 *qproc;
1125 struct rproc *rproc;
1126 int ret;
1127
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301128 desc = of_device_get_match_data(&pdev->dev);
1129 if (!desc)
1130 return -EINVAL;
1131
Bjorn Andersson051fb702016-06-20 14:28:41 -07001132 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301133 desc->hexagon_mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001134 if (!rproc) {
1135 dev_err(&pdev->dev, "failed to allocate rproc\n");
1136 return -ENOMEM;
1137 }
1138
Bjorn Andersson051fb702016-06-20 14:28:41 -07001139 qproc = (struct q6v5 *)rproc->priv;
1140 qproc->dev = &pdev->dev;
1141 qproc->rproc = rproc;
1142 platform_set_drvdata(pdev, qproc);
1143
1144 init_completion(&qproc->start_done);
1145 init_completion(&qproc->stop_done);
1146
1147 ret = q6v5_init_mem(qproc, pdev);
1148 if (ret)
1149 goto free_rproc;
1150
1151 ret = q6v5_alloc_memory_region(qproc);
1152 if (ret)
1153 goto free_rproc;
1154
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301155 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1156 desc->proxy_clk_names);
1157 if (ret < 0) {
1158 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001159 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301160 }
1161 qproc->proxy_clk_count = ret;
1162
1163 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1164 desc->active_clk_names);
1165 if (ret < 0) {
1166 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1167 goto free_rproc;
1168 }
1169 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001170
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301171 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1172 desc->proxy_supply);
1173 if (ret < 0) {
1174 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001175 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301176 }
1177 qproc->proxy_reg_count = ret;
1178
1179 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1180 desc->active_supply);
1181 if (ret < 0) {
1182 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1183 goto free_rproc;
1184 }
1185 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001186
1187 ret = q6v5_init_reset(qproc);
1188 if (ret)
1189 goto free_rproc;
1190
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301191 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301192 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001193 ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
1194 if (ret < 0)
1195 goto free_rproc;
1196
1197 ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
1198 if (ret < 0)
1199 goto free_rproc;
1200
1201 ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
1202 if (ret < 0)
1203 goto free_rproc;
1204
1205 ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
1206 if (ret < 0)
1207 goto free_rproc;
1208
1209 qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
Wei Yongjun4e968d92016-07-29 15:56:52 +00001210 if (IS_ERR(qproc->state)) {
1211 ret = PTR_ERR(qproc->state);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001212 goto free_rproc;
Wei Yongjun4e968d92016-07-29 15:56:52 +00001213 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301214 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1215 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001216 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001217 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Bjorn Andersson4b489212017-01-29 14:05:50 -08001218
Bjorn Andersson051fb702016-06-20 14:28:41 -07001219 ret = rproc_add(rproc);
1220 if (ret)
1221 goto free_rproc;
1222
1223 return 0;
1224
1225free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001226 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001227
1228 return ret;
1229}
1230
1231static int q6v5_remove(struct platform_device *pdev)
1232{
1233 struct q6v5 *qproc = platform_get_drvdata(pdev);
1234
1235 rproc_del(qproc->rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001236
1237 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001238 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001239 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001240
1241 return 0;
1242}
1243
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301244static const struct rproc_hexagon_res msm8996_mss = {
1245 .hexagon_mba_image = "mba.mbn",
1246 .proxy_clk_names = (char*[]){
1247 "xo",
1248 "pnoc",
1249 NULL
1250 },
1251 .active_clk_names = (char*[]){
1252 "iface",
1253 "bus",
1254 "mem",
1255 "gpll0_mss_clk",
1256 NULL
1257 },
1258 .need_mem_protection = true,
1259 .version = MSS_MSM8996,
1260};
1261
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301262static const struct rproc_hexagon_res msm8916_mss = {
1263 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301264 .proxy_supply = (struct qcom_mss_reg_res[]) {
1265 {
1266 .supply = "mx",
1267 .uV = 1050000,
1268 },
1269 {
1270 .supply = "cx",
1271 .uA = 100000,
1272 },
1273 {
1274 .supply = "pll",
1275 .uA = 100000,
1276 },
1277 {}
1278 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301279 .proxy_clk_names = (char*[]){
1280 "xo",
1281 NULL
1282 },
1283 .active_clk_names = (char*[]){
1284 "iface",
1285 "bus",
1286 "mem",
1287 NULL
1288 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301289 .need_mem_protection = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301290 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301291};
1292
1293static const struct rproc_hexagon_res msm8974_mss = {
1294 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301295 .proxy_supply = (struct qcom_mss_reg_res[]) {
1296 {
1297 .supply = "mx",
1298 .uV = 1050000,
1299 },
1300 {
1301 .supply = "cx",
1302 .uA = 100000,
1303 },
1304 {
1305 .supply = "pll",
1306 .uA = 100000,
1307 },
1308 {}
1309 },
1310 .active_supply = (struct qcom_mss_reg_res[]) {
1311 {
1312 .supply = "mss",
1313 .uV = 1050000,
1314 .uA = 100000,
1315 },
1316 {}
1317 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301318 .proxy_clk_names = (char*[]){
1319 "xo",
1320 NULL
1321 },
1322 .active_clk_names = (char*[]){
1323 "iface",
1324 "bus",
1325 "mem",
1326 NULL
1327 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301328 .need_mem_protection = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301329 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301330};
1331
Bjorn Andersson051fb702016-06-20 14:28:41 -07001332static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301333 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1334 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1335 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301336 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001337 { },
1338};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001339MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001340
1341static struct platform_driver q6v5_driver = {
1342 .probe = q6v5_probe,
1343 .remove = q6v5_remove,
1344 .driver = {
1345 .name = "qcom-q6v5-pil",
1346 .of_match_table = q6v5_of_match,
1347 },
1348};
1349module_platform_driver(q6v5_driver);
1350
1351MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1352MODULE_LICENSE("GPL v2");