blob: 8a3fa2bcc9f699da150222335c37e3e96eb31f26 [file] [log] [blame]
Bjorn Andersson051fb702016-06-20 14:28:41 -07001/*
2 * Qualcomm Peripheral Image Loader
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/mfd/syscon.h>
24#include <linux/module.h>
25#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053026#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070027#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/regulator/consumer.h>
30#include <linux/remoteproc.h>
31#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080032#include <linux/soc/qcom/mdt_loader.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070033#include <linux/soc/qcom/smem.h>
34#include <linux/soc/qcom/smem_state.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053035#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070036
37#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080038#include "qcom_common.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070039
40#include <linux/qcom_scm.h>
41
Bjorn Andersson051fb702016-06-20 14:28:41 -070042#define MPSS_CRASH_REASON_SMEM 421
43
44/* RMB Status Register Values */
45#define RMB_PBL_SUCCESS 0x1
46
47#define RMB_MBA_XPU_UNLOCKED 0x1
48#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
49#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
50#define RMB_MBA_AUTH_COMPLETE 0x4
51
52/* PBL/MBA interface registers */
53#define RMB_MBA_IMAGE_REG 0x00
54#define RMB_PBL_STATUS_REG 0x04
55#define RMB_MBA_COMMAND_REG 0x08
56#define RMB_MBA_STATUS_REG 0x0C
57#define RMB_PMI_META_DATA_REG 0x10
58#define RMB_PMI_CODE_START_REG 0x14
59#define RMB_PMI_CODE_LENGTH_REG 0x18
60
61#define RMB_CMD_META_DATA_READY 0x1
62#define RMB_CMD_LOAD_READY 0x2
63
64/* QDSP6SS Register Offsets */
65#define QDSP6SS_RESET_REG 0x014
66#define QDSP6SS_GFMUX_CTL_REG 0x020
67#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053068#define QDSP6SS_MEM_PWR_CTL 0x0B0
69#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070070
71/* AXI Halt Register Offsets */
72#define AXI_HALTREQ_REG 0x0
73#define AXI_HALTACK_REG 0x4
74#define AXI_IDLE_REG 0x8
75
76#define HALT_ACK_TIMEOUT_MS 100
77
78/* QDSP6SS_RESET */
79#define Q6SS_STOP_CORE BIT(0)
80#define Q6SS_CORE_ARES BIT(1)
81#define Q6SS_BUS_ARES_ENABLE BIT(2)
82
83/* QDSP6SS_GFMUX_CTL */
84#define Q6SS_CLK_ENABLE BIT(1)
85
86/* QDSP6SS_PWR_CTL */
87#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
88#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
89#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
90#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
91#define Q6SS_ETB_SLP_NRET_N BIT(17)
92#define Q6SS_L2DATA_STBY_N BIT(18)
93#define Q6SS_SLP_RET_N BIT(19)
94#define Q6SS_CLAMP_IO BIT(20)
95#define QDSS_BHS_ON BIT(21)
96#define QDSS_LDO_BYP BIT(22)
97
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053098/* QDSP6v56 parameters */
99#define QDSP6v56_LDO_BYP BIT(25)
100#define QDSP6v56_BHS_ON BIT(24)
101#define QDSP6v56_CLAMP_WL BIT(21)
102#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
103#define HALT_CHECK_MAX_LOOPS 200
104#define QDSP6SS_XO_CBCR 0x0038
105#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
106
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530107struct reg_info {
108 struct regulator *reg;
109 int uV;
110 int uA;
111};
112
113struct qcom_mss_reg_res {
114 const char *supply;
115 int uV;
116 int uA;
117};
118
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530119struct rproc_hexagon_res {
120 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100121 struct qcom_mss_reg_res *proxy_supply;
122 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530123 char **proxy_clk_names;
124 char **active_clk_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530125 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530126 bool need_mem_protection;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530127};
128
Bjorn Andersson051fb702016-06-20 14:28:41 -0700129struct q6v5 {
130 struct device *dev;
131 struct rproc *rproc;
132
133 void __iomem *reg_base;
134 void __iomem *rmb_base;
135
136 struct regmap *halt_map;
137 u32 halt_q6;
138 u32 halt_modem;
139 u32 halt_nc;
140
141 struct reset_control *mss_restart;
142
143 struct qcom_smem_state *state;
144 unsigned stop_bit;
145
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530146 struct clk *active_clks[8];
147 struct clk *proxy_clks[4];
148 int active_clk_count;
149 int proxy_clk_count;
150
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530151 struct reg_info active_regs[1];
152 struct reg_info proxy_regs[3];
153 int active_reg_count;
154 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700155
156 struct completion start_done;
157 struct completion stop_done;
158 bool running;
159
160 phys_addr_t mba_phys;
161 void *mba_region;
162 size_t mba_size;
163
164 phys_addr_t mpss_phys;
165 phys_addr_t mpss_reloc;
166 void *mpss_region;
167 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800168
169 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700170 struct qcom_rproc_ssr ssr_subdev;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530171 bool need_mem_protection;
172 int mpss_perm;
173 int mba_perm;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530174 int version;
175};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530176
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530177enum {
178 MSS_MSM8916,
179 MSS_MSM8974,
180 MSS_MSM8996,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700181};
182
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530183static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
184 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700185{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530186 int rc;
187 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700188
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800189 if (!reg_res)
190 return 0;
191
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530192 for (i = 0; reg_res[i].supply; i++) {
193 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
194 if (IS_ERR(regs[i].reg)) {
195 rc = PTR_ERR(regs[i].reg);
196 if (rc != -EPROBE_DEFER)
197 dev_err(dev, "Failed to get %s\n regulator",
198 reg_res[i].supply);
199 return rc;
200 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700201
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530202 regs[i].uV = reg_res[i].uV;
203 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700204 }
205
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530206 return i;
207}
208
209static int q6v5_regulator_enable(struct q6v5 *qproc,
210 struct reg_info *regs, int count)
211{
212 int ret;
213 int i;
214
215 for (i = 0; i < count; i++) {
216 if (regs[i].uV > 0) {
217 ret = regulator_set_voltage(regs[i].reg,
218 regs[i].uV, INT_MAX);
219 if (ret) {
220 dev_err(qproc->dev,
221 "Failed to request voltage for %d.\n",
222 i);
223 goto err;
224 }
225 }
226
227 if (regs[i].uA > 0) {
228 ret = regulator_set_load(regs[i].reg,
229 regs[i].uA);
230 if (ret < 0) {
231 dev_err(qproc->dev,
232 "Failed to set regulator mode\n");
233 goto err;
234 }
235 }
236
237 ret = regulator_enable(regs[i].reg);
238 if (ret) {
239 dev_err(qproc->dev, "Regulator enable failed\n");
240 goto err;
241 }
242 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700243
244 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530245err:
246 for (; i >= 0; i--) {
247 if (regs[i].uV > 0)
248 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
249
250 if (regs[i].uA > 0)
251 regulator_set_load(regs[i].reg, 0);
252
253 regulator_disable(regs[i].reg);
254 }
255
256 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700257}
258
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530259static void q6v5_regulator_disable(struct q6v5 *qproc,
260 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700261{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530262 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700263
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530264 for (i = 0; i < count; i++) {
265 if (regs[i].uV > 0)
266 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700267
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530268 if (regs[i].uA > 0)
269 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700270
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530271 regulator_disable(regs[i].reg);
272 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700273}
274
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530275static int q6v5_clk_enable(struct device *dev,
276 struct clk **clks, int count)
277{
278 int rc;
279 int i;
280
281 for (i = 0; i < count; i++) {
282 rc = clk_prepare_enable(clks[i]);
283 if (rc) {
284 dev_err(dev, "Clock enable failed\n");
285 goto err;
286 }
287 }
288
289 return 0;
290err:
291 for (i--; i >= 0; i--)
292 clk_disable_unprepare(clks[i]);
293
294 return rc;
295}
296
297static void q6v5_clk_disable(struct device *dev,
298 struct clk **clks, int count)
299{
300 int i;
301
302 for (i = 0; i < count; i++)
303 clk_disable_unprepare(clks[i]);
304}
305
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800306static struct resource_table *q6v5_find_rsc_table(struct rproc *rproc,
307 const struct firmware *fw,
308 int *tablesz)
309{
310 static struct resource_table table = { .ver = 1, };
311
312 *tablesz = sizeof(table);
313 return &table;
314}
315
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530316static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
317 bool remote_owner, phys_addr_t addr,
318 size_t size)
319{
320 struct qcom_scm_vmperm next;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530321
322 if (!qproc->need_mem_protection)
323 return 0;
324 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
325 return 0;
326 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
327 return 0;
328
329 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
330 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
331
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800332 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
333 current_perm, &next, 1);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530334}
335
Bjorn Andersson051fb702016-06-20 14:28:41 -0700336static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
337{
338 struct q6v5 *qproc = rproc->priv;
339
340 memcpy(qproc->mba_region, fw->data, fw->size);
341
342 return 0;
343}
344
345static const struct rproc_fw_ops q6v5_fw_ops = {
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800346 .find_rsc_table = q6v5_find_rsc_table,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700347 .load = q6v5_load,
348};
349
350static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
351{
352 unsigned long timeout;
353 s32 val;
354
355 timeout = jiffies + msecs_to_jiffies(ms);
356 for (;;) {
357 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
358 if (val)
359 break;
360
361 if (time_after(jiffies, timeout))
362 return -ETIMEDOUT;
363
364 msleep(1);
365 }
366
367 return val;
368}
369
370static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
371{
372
373 unsigned long timeout;
374 s32 val;
375
376 timeout = jiffies + msecs_to_jiffies(ms);
377 for (;;) {
378 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
379 if (val < 0)
380 break;
381
382 if (!status && val)
383 break;
384 else if (status && val == status)
385 break;
386
387 if (time_after(jiffies, timeout))
388 return -ETIMEDOUT;
389
390 msleep(1);
391 }
392
393 return val;
394}
395
396static int q6v5proc_reset(struct q6v5 *qproc)
397{
398 u32 val;
399 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530400 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700401
Bjorn Andersson051fb702016-06-20 14:28:41 -0700402
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530403 if (qproc->version == MSS_MSM8996) {
404 /* Override the ACC value if required */
405 writel(QDSP6SS_ACC_OVERRIDE_VAL,
406 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700407
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530408 /* Assert resets, stop core */
409 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
410 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
411 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700412
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530413 /* BHS require xo cbcr to be enabled */
414 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
415 val |= 0x1;
416 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
417
418 /* Read CLKOFF bit to go low indicating CLK is enabled */
419 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
420 val, !(val & BIT(31)), 1,
421 HALT_CHECK_MAX_LOOPS);
422 if (ret) {
423 dev_err(qproc->dev,
424 "xo cbcr enabling timed out (rc:%d)\n", ret);
425 return ret;
426 }
427 /* Enable power block headswitch and wait for it to stabilize */
428 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
429 val |= QDSP6v56_BHS_ON;
430 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
431 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
432 udelay(1);
433
434 /* Put LDO in bypass mode */
435 val |= QDSP6v56_LDO_BYP;
436 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
437
438 /* Deassert QDSP6 compiler memory clamp */
439 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
440 val &= ~QDSP6v56_CLAMP_QMC_MEM;
441 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
442
443 /* Deassert memory peripheral sleep and L2 memory standby */
444 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
445 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
446
447 /* Turn on L1, L2, ETB and JU memories 1 at a time */
448 val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
449 for (i = 19; i >= 0; i--) {
450 val |= BIT(i);
451 writel(val, qproc->reg_base +
452 QDSP6SS_MEM_PWR_CTL);
453 /*
454 * Read back value to ensure the write is done then
455 * wait for 1us for both memory peripheral and data
456 * array to turn on.
457 */
458 val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
459 udelay(1);
460 }
461 /* Remove word line clamp */
462 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
463 val &= ~QDSP6v56_CLAMP_WL;
464 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
465 } else {
466 /* Assert resets, stop core */
467 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
468 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
469 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
470
471 /* Enable power block headswitch and wait for it to stabilize */
472 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
473 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
474 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
475 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
476 udelay(1);
477 /*
478 * Turn on memories. L2 banks should be done individually
479 * to minimize inrush current.
480 */
481 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
482 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
483 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
484 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
485 val |= Q6SS_L2DATA_SLP_NRET_N_2;
486 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
487 val |= Q6SS_L2DATA_SLP_NRET_N_1;
488 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
489 val |= Q6SS_L2DATA_SLP_NRET_N_0;
490 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
491 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700492 /* Remove IO clamp */
493 val &= ~Q6SS_CLAMP_IO;
494 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
495
496 /* Bring core out of reset */
497 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
498 val &= ~Q6SS_CORE_ARES;
499 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
500
501 /* Turn on core clock */
502 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
503 val |= Q6SS_CLK_ENABLE;
504 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
505
506 /* Start core execution */
507 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
508 val &= ~Q6SS_STOP_CORE;
509 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
510
511 /* Wait for PBL status */
512 ret = q6v5_rmb_pbl_wait(qproc, 1000);
513 if (ret == -ETIMEDOUT) {
514 dev_err(qproc->dev, "PBL boot timed out\n");
515 } else if (ret != RMB_PBL_SUCCESS) {
516 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
517 ret = -EINVAL;
518 } else {
519 ret = 0;
520 }
521
522 return ret;
523}
524
525static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
526 struct regmap *halt_map,
527 u32 offset)
528{
529 unsigned long timeout;
530 unsigned int val;
531 int ret;
532
533 /* Check if we're already idle */
534 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
535 if (!ret && val)
536 return;
537
538 /* Assert halt request */
539 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
540
541 /* Wait for halt */
542 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
543 for (;;) {
544 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
545 if (ret || val || time_after(jiffies, timeout))
546 break;
547
548 msleep(1);
549 }
550
551 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
552 if (ret || !val)
553 dev_err(qproc->dev, "port failed halt\n");
554
555 /* Clear halt request (port will remain halted until reset) */
556 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
557}
558
559static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
560{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700561 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700562 dma_addr_t phys;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530563 int mdata_perm;
564 int xferop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700565 void *ptr;
566 int ret;
567
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700568 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700569 if (!ptr) {
570 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
571 return -ENOMEM;
572 }
573
574 memcpy(ptr, fw->data, fw->size);
575
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530576 /* Hypervisor mapping to access metadata by modem */
577 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
578 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
579 true, phys, fw->size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800580 if (ret) {
581 dev_err(qproc->dev,
582 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100583 ret = -EAGAIN;
584 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800585 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530586
Bjorn Andersson051fb702016-06-20 14:28:41 -0700587 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
588 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
589
590 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
591 if (ret == -ETIMEDOUT)
592 dev_err(qproc->dev, "MPSS header authentication timed out\n");
593 else if (ret < 0)
594 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
595
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530596 /* Metadata authentication done, remove modem access */
597 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
598 false, phys, fw->size);
599 if (xferop_ret)
600 dev_warn(qproc->dev,
601 "mdt buffer not reclaimed system may become unstable\n");
602
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100603free_dma_attrs:
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700604 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700605
606 return ret < 0 ? ret : 0;
607}
608
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800609static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
610{
611 if (phdr->p_type != PT_LOAD)
612 return false;
613
614 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
615 return false;
616
617 if (!phdr->p_memsz)
618 return false;
619
620 return true;
621}
622
623static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700624{
625 const struct elf32_phdr *phdrs;
626 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800627 const struct firmware *seg_fw;
628 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700629 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800630 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700631 phys_addr_t boot_addr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800632 phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
633 phys_addr_t max_addr = 0;
634 bool relocate = false;
635 char seg_name[10];
Bjorn Andersson01625cc52017-02-15 14:00:41 -0800636 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530637 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800638 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700639 int ret;
640 int i;
641
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800642 ret = request_firmware(&fw, "modem.mdt", qproc->dev);
643 if (ret < 0) {
644 dev_err(qproc->dev, "unable to load modem.mdt\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700645 return ret;
646 }
647
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800648 /* Initialize the RMB validator */
649 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
650
651 ret = q6v5_mpss_init_image(qproc, fw);
652 if (ret)
653 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700654
655 ehdr = (struct elf32_hdr *)fw->data;
656 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800657
658 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -0700659 phdr = &phdrs[i];
660
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800661 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700662 continue;
663
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800664 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
665 relocate = true;
666
667 if (phdr->p_paddr < min_addr)
668 min_addr = phdr->p_paddr;
669
670 if (phdr->p_paddr + phdr->p_memsz > max_addr)
671 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
672 }
673
674 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530675 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800676 for (i = 0; i < ehdr->e_phnum; i++) {
677 phdr = &phdrs[i];
678
679 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700680 continue;
681
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800682 offset = phdr->p_paddr - mpss_reloc;
683 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
684 dev_err(qproc->dev, "segment outside memory range\n");
685 ret = -EINVAL;
686 goto release_firmware;
687 }
688
689 ptr = qproc->mpss_region + offset;
690
691 if (phdr->p_filesz) {
692 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
693 ret = request_firmware(&seg_fw, seg_name, qproc->dev);
694 if (ret) {
695 dev_err(qproc->dev, "failed to load %s\n", seg_name);
696 goto release_firmware;
697 }
698
699 memcpy(ptr, seg_fw->data, seg_fw->size);
700
701 release_firmware(seg_fw);
702 }
703
704 if (phdr->p_memsz > phdr->p_filesz) {
705 memset(ptr + phdr->p_filesz, 0,
706 phdr->p_memsz - phdr->p_filesz);
707 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700708 size += phdr->p_memsz;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700709 }
710
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530711 /* Transfer ownership of modem ddr region to q6 */
712 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
713 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800714 if (ret) {
715 dev_err(qproc->dev,
716 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100717 ret = -EAGAIN;
718 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800719 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530720
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530721 boot_addr = relocate ? qproc->mpss_phys : min_addr;
722 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
723 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
724 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
725
Bjorn Andersson72beb492016-07-12 17:15:45 -0700726 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
727 if (ret == -ETIMEDOUT)
728 dev_err(qproc->dev, "MPSS authentication timed out\n");
729 else if (ret < 0)
730 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
731
Bjorn Andersson051fb702016-06-20 14:28:41 -0700732release_firmware:
733 release_firmware(fw);
734
735 return ret < 0 ? ret : 0;
736}
737
738static int q6v5_start(struct rproc *rproc)
739{
740 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530741 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700742 int ret;
743
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530744 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
745 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700746 if (ret) {
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530747 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700748 return ret;
749 }
750
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530751 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
752 qproc->proxy_clk_count);
753 if (ret) {
754 dev_err(qproc->dev, "failed to enable proxy clocks\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530755 goto disable_proxy_reg;
756 }
757
758 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
759 qproc->active_reg_count);
760 if (ret) {
761 dev_err(qproc->dev, "failed to enable supplies\n");
762 goto disable_proxy_clk;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530763 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700764 ret = reset_control_deassert(qproc->mss_restart);
765 if (ret) {
766 dev_err(qproc->dev, "failed to deassert mss restart\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530767 goto disable_vdd;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700768 }
769
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530770 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
771 qproc->active_clk_count);
772 if (ret) {
773 dev_err(qproc->dev, "failed to enable clocks\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700774 goto assert_reset;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530775 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700776
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530777 /* Assign MBA image access in DDR to q6 */
778 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
779 qproc->mba_phys,
780 qproc->mba_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800781 if (xfermemop_ret) {
782 dev_err(qproc->dev,
783 "assigning Q6 access to mba memory failed: %d\n",
784 xfermemop_ret);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530785 goto disable_active_clks;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800786 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530787
Bjorn Andersson051fb702016-06-20 14:28:41 -0700788 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
789
790 ret = q6v5proc_reset(qproc);
791 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530792 goto reclaim_mba;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700793
794 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
795 if (ret == -ETIMEDOUT) {
796 dev_err(qproc->dev, "MBA boot timed out\n");
797 goto halt_axi_ports;
798 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
799 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
800 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
801 ret = -EINVAL;
802 goto halt_axi_ports;
803 }
804
805 dev_info(qproc->dev, "MBA booted, loading mpss\n");
806
807 ret = q6v5_mpss_load(qproc);
808 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530809 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700810
811 ret = wait_for_completion_timeout(&qproc->start_done,
812 msecs_to_jiffies(5000));
813 if (ret == 0) {
814 dev_err(qproc->dev, "start timed out\n");
815 ret = -ETIMEDOUT;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530816 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700817 }
818
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530819 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
820 qproc->mba_phys,
821 qproc->mba_size);
822 if (xfermemop_ret)
823 dev_err(qproc->dev,
824 "Failed to reclaim mba buffer system may become unstable\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700825 qproc->running = true;
826
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530827 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
828 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530829 q6v5_regulator_disable(qproc, qproc->proxy_regs,
830 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700831
832 return 0;
833
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530834reclaim_mpss:
835 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
836 false, qproc->mpss_phys,
837 qproc->mpss_size);
838 WARN_ON(xfermemop_ret);
839
Bjorn Andersson051fb702016-06-20 14:28:41 -0700840halt_axi_ports:
841 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
842 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
843 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530844
845reclaim_mba:
846 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
847 qproc->mba_phys,
848 qproc->mba_size);
849 if (xfermemop_ret) {
850 dev_err(qproc->dev,
851 "Failed to reclaim mba buffer, system may become unstable\n");
852 }
853
854disable_active_clks:
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530855 q6v5_clk_disable(qproc->dev, qproc->active_clks,
856 qproc->active_clk_count);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530857
Bjorn Andersson051fb702016-06-20 14:28:41 -0700858assert_reset:
859 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530860disable_vdd:
861 q6v5_regulator_disable(qproc, qproc->active_regs,
862 qproc->active_reg_count);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530863disable_proxy_clk:
864 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
865 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530866disable_proxy_reg:
867 q6v5_regulator_disable(qproc, qproc->proxy_regs,
868 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700869
870 return ret;
871}
872
873static int q6v5_stop(struct rproc *rproc)
874{
875 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
876 int ret;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530877 u32 val;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700878
879 qproc->running = false;
880
881 qcom_smem_state_update_bits(qproc->state,
882 BIT(qproc->stop_bit), BIT(qproc->stop_bit));
883
884 ret = wait_for_completion_timeout(&qproc->stop_done,
885 msecs_to_jiffies(5000));
886 if (ret == 0)
887 dev_err(qproc->dev, "timed out on wait\n");
888
889 qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
890
891 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
892 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
893 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530894 if (qproc->version == MSS_MSM8996) {
895 /*
896 * To avoid high MX current during LPASS/MSS restart.
897 */
898 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
899 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
900 QDSP6v56_CLAMP_QMC_MEM;
901 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
902 }
903
Bjorn Andersson051fb702016-06-20 14:28:41 -0700904
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530905 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
906 qproc->mpss_phys, qproc->mpss_size);
907 WARN_ON(ret);
908
Bjorn Andersson051fb702016-06-20 14:28:41 -0700909 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530910 q6v5_clk_disable(qproc->dev, qproc->active_clks,
911 qproc->active_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530912 q6v5_regulator_disable(qproc, qproc->active_regs,
913 qproc->active_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700914
915 return 0;
916}
917
918static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
919{
920 struct q6v5 *qproc = rproc->priv;
921 int offset;
922
923 offset = da - qproc->mpss_reloc;
924 if (offset < 0 || offset + len > qproc->mpss_size)
925 return NULL;
926
927 return qproc->mpss_region + offset;
928}
929
930static const struct rproc_ops q6v5_ops = {
931 .start = q6v5_start,
932 .stop = q6v5_stop,
933 .da_to_va = q6v5_da_to_va,
934};
935
936static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
937{
938 struct q6v5 *qproc = dev;
939 size_t len;
940 char *msg;
941
942 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
943 if (!qproc->running) {
944 complete(&qproc->stop_done);
945 return IRQ_HANDLED;
946 }
947
948 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
949 if (!IS_ERR(msg) && len > 0 && msg[0])
950 dev_err(qproc->dev, "watchdog received: %s\n", msg);
951 else
952 dev_err(qproc->dev, "watchdog without message\n");
953
954 rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
955
956 if (!IS_ERR(msg))
957 msg[0] = '\0';
958
959 return IRQ_HANDLED;
960}
961
962static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
963{
964 struct q6v5 *qproc = dev;
965 size_t len;
966 char *msg;
967
968 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
969 if (!IS_ERR(msg) && len > 0 && msg[0])
970 dev_err(qproc->dev, "fatal error received: %s\n", msg);
971 else
972 dev_err(qproc->dev, "fatal error without message\n");
973
974 rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
975
976 if (!IS_ERR(msg))
977 msg[0] = '\0';
978
979 return IRQ_HANDLED;
980}
981
982static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
983{
984 struct q6v5 *qproc = dev;
985
986 complete(&qproc->start_done);
987 return IRQ_HANDLED;
988}
989
990static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
991{
992 struct q6v5 *qproc = dev;
993
994 complete(&qproc->stop_done);
995 return IRQ_HANDLED;
996}
997
998static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
999{
1000 struct of_phandle_args args;
1001 struct resource *res;
1002 int ret;
1003
1004 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1005 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001006 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001007 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001008
1009 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1010 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001011 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001012 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001013
1014 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1015 "qcom,halt-regs", 3, 0, &args);
1016 if (ret < 0) {
1017 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1018 return -EINVAL;
1019 }
1020
1021 qproc->halt_map = syscon_node_to_regmap(args.np);
1022 of_node_put(args.np);
1023 if (IS_ERR(qproc->halt_map))
1024 return PTR_ERR(qproc->halt_map);
1025
1026 qproc->halt_q6 = args.args[0];
1027 qproc->halt_modem = args.args[1];
1028 qproc->halt_nc = args.args[2];
1029
1030 return 0;
1031}
1032
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301033static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1034 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001035{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301036 int i;
1037
1038 if (!clk_names)
1039 return 0;
1040
1041 for (i = 0; clk_names[i]; i++) {
1042 clks[i] = devm_clk_get(dev, clk_names[i]);
1043 if (IS_ERR(clks[i])) {
1044 int rc = PTR_ERR(clks[i]);
1045
1046 if (rc != -EPROBE_DEFER)
1047 dev_err(dev, "Failed to get %s clock\n",
1048 clk_names[i]);
1049 return rc;
1050 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001051 }
1052
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301053 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001054}
1055
1056static int q6v5_init_reset(struct q6v5 *qproc)
1057{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001058 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1059 NULL);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001060 if (IS_ERR(qproc->mss_restart)) {
1061 dev_err(qproc->dev, "failed to acquire mss restart\n");
1062 return PTR_ERR(qproc->mss_restart);
1063 }
1064
1065 return 0;
1066}
1067
1068static int q6v5_request_irq(struct q6v5 *qproc,
1069 struct platform_device *pdev,
1070 const char *name,
1071 irq_handler_t thread_fn)
1072{
1073 int ret;
1074
1075 ret = platform_get_irq_byname(pdev, name);
1076 if (ret < 0) {
1077 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
1078 return ret;
1079 }
1080
1081 ret = devm_request_threaded_irq(&pdev->dev, ret,
1082 NULL, thread_fn,
1083 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
1084 "q6v5", qproc);
1085 if (ret)
1086 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
1087
1088 return ret;
1089}
1090
1091static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1092{
1093 struct device_node *child;
1094 struct device_node *node;
1095 struct resource r;
1096 int ret;
1097
1098 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1099 node = of_parse_phandle(child, "memory-region", 0);
1100 ret = of_address_to_resource(node, 0, &r);
1101 if (ret) {
1102 dev_err(qproc->dev, "unable to resolve mba region\n");
1103 return ret;
1104 }
1105
1106 qproc->mba_phys = r.start;
1107 qproc->mba_size = resource_size(&r);
1108 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1109 if (!qproc->mba_region) {
1110 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1111 &r.start, qproc->mba_size);
1112 return -EBUSY;
1113 }
1114
1115 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1116 node = of_parse_phandle(child, "memory-region", 0);
1117 ret = of_address_to_resource(node, 0, &r);
1118 if (ret) {
1119 dev_err(qproc->dev, "unable to resolve mpss region\n");
1120 return ret;
1121 }
1122
1123 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1124 qproc->mpss_size = resource_size(&r);
1125 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1126 if (!qproc->mpss_region) {
1127 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1128 &r.start, qproc->mpss_size);
1129 return -EBUSY;
1130 }
1131
1132 return 0;
1133}
1134
1135static int q6v5_probe(struct platform_device *pdev)
1136{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301137 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001138 struct q6v5 *qproc;
1139 struct rproc *rproc;
1140 int ret;
1141
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301142 desc = of_device_get_match_data(&pdev->dev);
1143 if (!desc)
1144 return -EINVAL;
1145
Bjorn Andersson051fb702016-06-20 14:28:41 -07001146 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301147 desc->hexagon_mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001148 if (!rproc) {
1149 dev_err(&pdev->dev, "failed to allocate rproc\n");
1150 return -ENOMEM;
1151 }
1152
1153 rproc->fw_ops = &q6v5_fw_ops;
1154
1155 qproc = (struct q6v5 *)rproc->priv;
1156 qproc->dev = &pdev->dev;
1157 qproc->rproc = rproc;
1158 platform_set_drvdata(pdev, qproc);
1159
1160 init_completion(&qproc->start_done);
1161 init_completion(&qproc->stop_done);
1162
1163 ret = q6v5_init_mem(qproc, pdev);
1164 if (ret)
1165 goto free_rproc;
1166
1167 ret = q6v5_alloc_memory_region(qproc);
1168 if (ret)
1169 goto free_rproc;
1170
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301171 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1172 desc->proxy_clk_names);
1173 if (ret < 0) {
1174 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001175 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301176 }
1177 qproc->proxy_clk_count = ret;
1178
1179 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1180 desc->active_clk_names);
1181 if (ret < 0) {
1182 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1183 goto free_rproc;
1184 }
1185 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001186
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301187 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1188 desc->proxy_supply);
1189 if (ret < 0) {
1190 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001191 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301192 }
1193 qproc->proxy_reg_count = ret;
1194
1195 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1196 desc->active_supply);
1197 if (ret < 0) {
1198 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1199 goto free_rproc;
1200 }
1201 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001202
1203 ret = q6v5_init_reset(qproc);
1204 if (ret)
1205 goto free_rproc;
1206
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301207 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301208 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001209 ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
1210 if (ret < 0)
1211 goto free_rproc;
1212
1213 ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
1214 if (ret < 0)
1215 goto free_rproc;
1216
1217 ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
1218 if (ret < 0)
1219 goto free_rproc;
1220
1221 ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
1222 if (ret < 0)
1223 goto free_rproc;
1224
1225 qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
Wei Yongjun4e968d92016-07-29 15:56:52 +00001226 if (IS_ERR(qproc->state)) {
1227 ret = PTR_ERR(qproc->state);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001228 goto free_rproc;
Wei Yongjun4e968d92016-07-29 15:56:52 +00001229 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301230 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1231 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001232 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001233 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Bjorn Andersson4b489212017-01-29 14:05:50 -08001234
Bjorn Andersson051fb702016-06-20 14:28:41 -07001235 ret = rproc_add(rproc);
1236 if (ret)
1237 goto free_rproc;
1238
1239 return 0;
1240
1241free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001242 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001243
1244 return ret;
1245}
1246
1247static int q6v5_remove(struct platform_device *pdev)
1248{
1249 struct q6v5 *qproc = platform_get_drvdata(pdev);
1250
1251 rproc_del(qproc->rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001252
1253 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001254 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001255 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001256
1257 return 0;
1258}
1259
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301260static const struct rproc_hexagon_res msm8996_mss = {
1261 .hexagon_mba_image = "mba.mbn",
1262 .proxy_clk_names = (char*[]){
1263 "xo",
1264 "pnoc",
1265 NULL
1266 },
1267 .active_clk_names = (char*[]){
1268 "iface",
1269 "bus",
1270 "mem",
1271 "gpll0_mss_clk",
1272 NULL
1273 },
1274 .need_mem_protection = true,
1275 .version = MSS_MSM8996,
1276};
1277
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301278static const struct rproc_hexagon_res msm8916_mss = {
1279 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301280 .proxy_supply = (struct qcom_mss_reg_res[]) {
1281 {
1282 .supply = "mx",
1283 .uV = 1050000,
1284 },
1285 {
1286 .supply = "cx",
1287 .uA = 100000,
1288 },
1289 {
1290 .supply = "pll",
1291 .uA = 100000,
1292 },
1293 {}
1294 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301295 .proxy_clk_names = (char*[]){
1296 "xo",
1297 NULL
1298 },
1299 .active_clk_names = (char*[]){
1300 "iface",
1301 "bus",
1302 "mem",
1303 NULL
1304 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301305 .need_mem_protection = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301306 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301307};
1308
1309static const struct rproc_hexagon_res msm8974_mss = {
1310 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301311 .proxy_supply = (struct qcom_mss_reg_res[]) {
1312 {
1313 .supply = "mx",
1314 .uV = 1050000,
1315 },
1316 {
1317 .supply = "cx",
1318 .uA = 100000,
1319 },
1320 {
1321 .supply = "pll",
1322 .uA = 100000,
1323 },
1324 {}
1325 },
1326 .active_supply = (struct qcom_mss_reg_res[]) {
1327 {
1328 .supply = "mss",
1329 .uV = 1050000,
1330 .uA = 100000,
1331 },
1332 {}
1333 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301334 .proxy_clk_names = (char*[]){
1335 "xo",
1336 NULL
1337 },
1338 .active_clk_names = (char*[]){
1339 "iface",
1340 "bus",
1341 "mem",
1342 NULL
1343 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301344 .need_mem_protection = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301345 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301346};
1347
Bjorn Andersson051fb702016-06-20 14:28:41 -07001348static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301349 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1350 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1351 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301352 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001353 { },
1354};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001355MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001356
1357static struct platform_driver q6v5_driver = {
1358 .probe = q6v5_probe,
1359 .remove = q6v5_remove,
1360 .driver = {
1361 .name = "qcom-q6v5-pil",
1362 .of_match_table = q6v5_of_match,
1363 },
1364};
1365module_platform_driver(q6v5_driver);
1366
1367MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1368MODULE_LICENSE("GPL v2");