blob: 3ea668d9fd4c505bda81d9508b27614e7c4e4bbc [file] [log] [blame]
Bjorn Andersson051fb702016-06-20 14:28:41 -07001/*
2 * Qualcomm Peripheral Image Loader
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/mfd/syscon.h>
24#include <linux/module.h>
25#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053026#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070027#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/regulator/consumer.h>
30#include <linux/remoteproc.h>
31#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080032#include <linux/soc/qcom/mdt_loader.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070033#include <linux/soc/qcom/smem.h>
34#include <linux/soc/qcom/smem_state.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053035#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070036
37#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080038#include "qcom_common.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070039
40#include <linux/qcom_scm.h>
41
Bjorn Andersson051fb702016-06-20 14:28:41 -070042#define MPSS_CRASH_REASON_SMEM 421
43
44/* RMB Status Register Values */
45#define RMB_PBL_SUCCESS 0x1
46
47#define RMB_MBA_XPU_UNLOCKED 0x1
48#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
49#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
50#define RMB_MBA_AUTH_COMPLETE 0x4
51
52/* PBL/MBA interface registers */
53#define RMB_MBA_IMAGE_REG 0x00
54#define RMB_PBL_STATUS_REG 0x04
55#define RMB_MBA_COMMAND_REG 0x08
56#define RMB_MBA_STATUS_REG 0x0C
57#define RMB_PMI_META_DATA_REG 0x10
58#define RMB_PMI_CODE_START_REG 0x14
59#define RMB_PMI_CODE_LENGTH_REG 0x18
60
61#define RMB_CMD_META_DATA_READY 0x1
62#define RMB_CMD_LOAD_READY 0x2
63
64/* QDSP6SS Register Offsets */
65#define QDSP6SS_RESET_REG 0x014
66#define QDSP6SS_GFMUX_CTL_REG 0x020
67#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053068#define QDSP6SS_MEM_PWR_CTL 0x0B0
69#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070070
71/* AXI Halt Register Offsets */
72#define AXI_HALTREQ_REG 0x0
73#define AXI_HALTACK_REG 0x4
74#define AXI_IDLE_REG 0x8
75
76#define HALT_ACK_TIMEOUT_MS 100
77
78/* QDSP6SS_RESET */
79#define Q6SS_STOP_CORE BIT(0)
80#define Q6SS_CORE_ARES BIT(1)
81#define Q6SS_BUS_ARES_ENABLE BIT(2)
82
83/* QDSP6SS_GFMUX_CTL */
84#define Q6SS_CLK_ENABLE BIT(1)
85
86/* QDSP6SS_PWR_CTL */
87#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
88#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
89#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
90#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
91#define Q6SS_ETB_SLP_NRET_N BIT(17)
92#define Q6SS_L2DATA_STBY_N BIT(18)
93#define Q6SS_SLP_RET_N BIT(19)
94#define Q6SS_CLAMP_IO BIT(20)
95#define QDSS_BHS_ON BIT(21)
96#define QDSS_LDO_BYP BIT(22)
97
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053098/* QDSP6v56 parameters */
99#define QDSP6v56_LDO_BYP BIT(25)
100#define QDSP6v56_BHS_ON BIT(24)
101#define QDSP6v56_CLAMP_WL BIT(21)
102#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
103#define HALT_CHECK_MAX_LOOPS 200
104#define QDSP6SS_XO_CBCR 0x0038
105#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
106
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530107struct reg_info {
108 struct regulator *reg;
109 int uV;
110 int uA;
111};
112
113struct qcom_mss_reg_res {
114 const char *supply;
115 int uV;
116 int uA;
117};
118
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530119struct rproc_hexagon_res {
120 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100121 struct qcom_mss_reg_res *proxy_supply;
122 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530123 char **proxy_clk_names;
124 char **active_clk_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530125 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530126 bool need_mem_protection;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530127};
128
Bjorn Andersson051fb702016-06-20 14:28:41 -0700129struct q6v5 {
130 struct device *dev;
131 struct rproc *rproc;
132
133 void __iomem *reg_base;
134 void __iomem *rmb_base;
135
136 struct regmap *halt_map;
137 u32 halt_q6;
138 u32 halt_modem;
139 u32 halt_nc;
140
141 struct reset_control *mss_restart;
142
143 struct qcom_smem_state *state;
144 unsigned stop_bit;
145
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530146 struct clk *active_clks[8];
147 struct clk *proxy_clks[4];
148 int active_clk_count;
149 int proxy_clk_count;
150
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530151 struct reg_info active_regs[1];
152 struct reg_info proxy_regs[3];
153 int active_reg_count;
154 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700155
156 struct completion start_done;
157 struct completion stop_done;
158 bool running;
159
160 phys_addr_t mba_phys;
161 void *mba_region;
162 size_t mba_size;
163
164 phys_addr_t mpss_phys;
165 phys_addr_t mpss_reloc;
166 void *mpss_region;
167 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800168
169 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700170 struct qcom_rproc_ssr ssr_subdev;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530171 bool need_mem_protection;
172 int mpss_perm;
173 int mba_perm;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530174 int version;
175};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530176
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530177enum {
178 MSS_MSM8916,
179 MSS_MSM8974,
180 MSS_MSM8996,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700181};
182
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530183static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
184 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700185{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530186 int rc;
187 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700188
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800189 if (!reg_res)
190 return 0;
191
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530192 for (i = 0; reg_res[i].supply; i++) {
193 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
194 if (IS_ERR(regs[i].reg)) {
195 rc = PTR_ERR(regs[i].reg);
196 if (rc != -EPROBE_DEFER)
197 dev_err(dev, "Failed to get %s\n regulator",
198 reg_res[i].supply);
199 return rc;
200 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700201
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530202 regs[i].uV = reg_res[i].uV;
203 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700204 }
205
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530206 return i;
207}
208
209static int q6v5_regulator_enable(struct q6v5 *qproc,
210 struct reg_info *regs, int count)
211{
212 int ret;
213 int i;
214
215 for (i = 0; i < count; i++) {
216 if (regs[i].uV > 0) {
217 ret = regulator_set_voltage(regs[i].reg,
218 regs[i].uV, INT_MAX);
219 if (ret) {
220 dev_err(qproc->dev,
221 "Failed to request voltage for %d.\n",
222 i);
223 goto err;
224 }
225 }
226
227 if (regs[i].uA > 0) {
228 ret = regulator_set_load(regs[i].reg,
229 regs[i].uA);
230 if (ret < 0) {
231 dev_err(qproc->dev,
232 "Failed to set regulator mode\n");
233 goto err;
234 }
235 }
236
237 ret = regulator_enable(regs[i].reg);
238 if (ret) {
239 dev_err(qproc->dev, "Regulator enable failed\n");
240 goto err;
241 }
242 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700243
244 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530245err:
246 for (; i >= 0; i--) {
247 if (regs[i].uV > 0)
248 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
249
250 if (regs[i].uA > 0)
251 regulator_set_load(regs[i].reg, 0);
252
253 regulator_disable(regs[i].reg);
254 }
255
256 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700257}
258
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530259static void q6v5_regulator_disable(struct q6v5 *qproc,
260 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700261{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530262 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700263
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530264 for (i = 0; i < count; i++) {
265 if (regs[i].uV > 0)
266 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700267
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530268 if (regs[i].uA > 0)
269 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700270
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530271 regulator_disable(regs[i].reg);
272 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700273}
274
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530275static int q6v5_clk_enable(struct device *dev,
276 struct clk **clks, int count)
277{
278 int rc;
279 int i;
280
281 for (i = 0; i < count; i++) {
282 rc = clk_prepare_enable(clks[i]);
283 if (rc) {
284 dev_err(dev, "Clock enable failed\n");
285 goto err;
286 }
287 }
288
289 return 0;
290err:
291 for (i--; i >= 0; i--)
292 clk_disable_unprepare(clks[i]);
293
294 return rc;
295}
296
297static void q6v5_clk_disable(struct device *dev,
298 struct clk **clks, int count)
299{
300 int i;
301
302 for (i = 0; i < count; i++)
303 clk_disable_unprepare(clks[i]);
304}
305
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800306static struct resource_table *q6v5_find_rsc_table(struct rproc *rproc,
307 const struct firmware *fw,
308 int *tablesz)
309{
310 static struct resource_table table = { .ver = 1, };
311
312 *tablesz = sizeof(table);
313 return &table;
314}
315
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530316static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
317 bool remote_owner, phys_addr_t addr,
318 size_t size)
319{
320 struct qcom_scm_vmperm next;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530321
322 if (!qproc->need_mem_protection)
323 return 0;
324 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
325 return 0;
326 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
327 return 0;
328
329 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
330 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
331
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800332 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
333 current_perm, &next, 1);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530334}
335
Bjorn Andersson051fb702016-06-20 14:28:41 -0700336static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
337{
338 struct q6v5 *qproc = rproc->priv;
339
340 memcpy(qproc->mba_region, fw->data, fw->size);
341
342 return 0;
343}
344
Bjorn Andersson051fb702016-06-20 14:28:41 -0700345static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
346{
347 unsigned long timeout;
348 s32 val;
349
350 timeout = jiffies + msecs_to_jiffies(ms);
351 for (;;) {
352 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
353 if (val)
354 break;
355
356 if (time_after(jiffies, timeout))
357 return -ETIMEDOUT;
358
359 msleep(1);
360 }
361
362 return val;
363}
364
365static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
366{
367
368 unsigned long timeout;
369 s32 val;
370
371 timeout = jiffies + msecs_to_jiffies(ms);
372 for (;;) {
373 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
374 if (val < 0)
375 break;
376
377 if (!status && val)
378 break;
379 else if (status && val == status)
380 break;
381
382 if (time_after(jiffies, timeout))
383 return -ETIMEDOUT;
384
385 msleep(1);
386 }
387
388 return val;
389}
390
391static int q6v5proc_reset(struct q6v5 *qproc)
392{
393 u32 val;
394 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530395 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700396
Bjorn Andersson051fb702016-06-20 14:28:41 -0700397
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530398 if (qproc->version == MSS_MSM8996) {
399 /* Override the ACC value if required */
400 writel(QDSP6SS_ACC_OVERRIDE_VAL,
401 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700402
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530403 /* Assert resets, stop core */
404 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
405 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
406 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700407
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530408 /* BHS require xo cbcr to be enabled */
409 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
410 val |= 0x1;
411 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
412
413 /* Read CLKOFF bit to go low indicating CLK is enabled */
414 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
415 val, !(val & BIT(31)), 1,
416 HALT_CHECK_MAX_LOOPS);
417 if (ret) {
418 dev_err(qproc->dev,
419 "xo cbcr enabling timed out (rc:%d)\n", ret);
420 return ret;
421 }
422 /* Enable power block headswitch and wait for it to stabilize */
423 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
424 val |= QDSP6v56_BHS_ON;
425 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
426 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
427 udelay(1);
428
429 /* Put LDO in bypass mode */
430 val |= QDSP6v56_LDO_BYP;
431 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
432
433 /* Deassert QDSP6 compiler memory clamp */
434 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
435 val &= ~QDSP6v56_CLAMP_QMC_MEM;
436 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
437
438 /* Deassert memory peripheral sleep and L2 memory standby */
439 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
440 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
441
442 /* Turn on L1, L2, ETB and JU memories 1 at a time */
443 val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
444 for (i = 19; i >= 0; i--) {
445 val |= BIT(i);
446 writel(val, qproc->reg_base +
447 QDSP6SS_MEM_PWR_CTL);
448 /*
449 * Read back value to ensure the write is done then
450 * wait for 1us for both memory peripheral and data
451 * array to turn on.
452 */
453 val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
454 udelay(1);
455 }
456 /* Remove word line clamp */
457 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
458 val &= ~QDSP6v56_CLAMP_WL;
459 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
460 } else {
461 /* Assert resets, stop core */
462 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
463 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
464 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
465
466 /* Enable power block headswitch and wait for it to stabilize */
467 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
468 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
469 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
470 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
471 udelay(1);
472 /*
473 * Turn on memories. L2 banks should be done individually
474 * to minimize inrush current.
475 */
476 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
477 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
478 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
479 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
480 val |= Q6SS_L2DATA_SLP_NRET_N_2;
481 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
482 val |= Q6SS_L2DATA_SLP_NRET_N_1;
483 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
484 val |= Q6SS_L2DATA_SLP_NRET_N_0;
485 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
486 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700487 /* Remove IO clamp */
488 val &= ~Q6SS_CLAMP_IO;
489 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
490
491 /* Bring core out of reset */
492 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
493 val &= ~Q6SS_CORE_ARES;
494 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
495
496 /* Turn on core clock */
497 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
498 val |= Q6SS_CLK_ENABLE;
499 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
500
501 /* Start core execution */
502 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
503 val &= ~Q6SS_STOP_CORE;
504 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
505
506 /* Wait for PBL status */
507 ret = q6v5_rmb_pbl_wait(qproc, 1000);
508 if (ret == -ETIMEDOUT) {
509 dev_err(qproc->dev, "PBL boot timed out\n");
510 } else if (ret != RMB_PBL_SUCCESS) {
511 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
512 ret = -EINVAL;
513 } else {
514 ret = 0;
515 }
516
517 return ret;
518}
519
520static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
521 struct regmap *halt_map,
522 u32 offset)
523{
524 unsigned long timeout;
525 unsigned int val;
526 int ret;
527
528 /* Check if we're already idle */
529 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
530 if (!ret && val)
531 return;
532
533 /* Assert halt request */
534 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
535
536 /* Wait for halt */
537 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
538 for (;;) {
539 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
540 if (ret || val || time_after(jiffies, timeout))
541 break;
542
543 msleep(1);
544 }
545
546 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
547 if (ret || !val)
548 dev_err(qproc->dev, "port failed halt\n");
549
550 /* Clear halt request (port will remain halted until reset) */
551 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
552}
553
554static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
555{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700556 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700557 dma_addr_t phys;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530558 int mdata_perm;
559 int xferop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700560 void *ptr;
561 int ret;
562
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700563 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700564 if (!ptr) {
565 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
566 return -ENOMEM;
567 }
568
569 memcpy(ptr, fw->data, fw->size);
570
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530571 /* Hypervisor mapping to access metadata by modem */
572 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
573 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
574 true, phys, fw->size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800575 if (ret) {
576 dev_err(qproc->dev,
577 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100578 ret = -EAGAIN;
579 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800580 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530581
Bjorn Andersson051fb702016-06-20 14:28:41 -0700582 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
583 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
584
585 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
586 if (ret == -ETIMEDOUT)
587 dev_err(qproc->dev, "MPSS header authentication timed out\n");
588 else if (ret < 0)
589 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
590
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530591 /* Metadata authentication done, remove modem access */
592 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
593 false, phys, fw->size);
594 if (xferop_ret)
595 dev_warn(qproc->dev,
596 "mdt buffer not reclaimed system may become unstable\n");
597
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100598free_dma_attrs:
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700599 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700600
601 return ret < 0 ? ret : 0;
602}
603
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800604static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
605{
606 if (phdr->p_type != PT_LOAD)
607 return false;
608
609 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
610 return false;
611
612 if (!phdr->p_memsz)
613 return false;
614
615 return true;
616}
617
618static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700619{
620 const struct elf32_phdr *phdrs;
621 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800622 const struct firmware *seg_fw;
623 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700624 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800625 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700626 phys_addr_t boot_addr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800627 phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
628 phys_addr_t max_addr = 0;
629 bool relocate = false;
630 char seg_name[10];
Bjorn Andersson01625cc52017-02-15 14:00:41 -0800631 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530632 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800633 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700634 int ret;
635 int i;
636
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800637 ret = request_firmware(&fw, "modem.mdt", qproc->dev);
638 if (ret < 0) {
639 dev_err(qproc->dev, "unable to load modem.mdt\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700640 return ret;
641 }
642
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800643 /* Initialize the RMB validator */
644 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
645
646 ret = q6v5_mpss_init_image(qproc, fw);
647 if (ret)
648 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700649
650 ehdr = (struct elf32_hdr *)fw->data;
651 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800652
653 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -0700654 phdr = &phdrs[i];
655
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800656 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700657 continue;
658
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800659 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
660 relocate = true;
661
662 if (phdr->p_paddr < min_addr)
663 min_addr = phdr->p_paddr;
664
665 if (phdr->p_paddr + phdr->p_memsz > max_addr)
666 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
667 }
668
669 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530670 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800671 for (i = 0; i < ehdr->e_phnum; i++) {
672 phdr = &phdrs[i];
673
674 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700675 continue;
676
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800677 offset = phdr->p_paddr - mpss_reloc;
678 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
679 dev_err(qproc->dev, "segment outside memory range\n");
680 ret = -EINVAL;
681 goto release_firmware;
682 }
683
684 ptr = qproc->mpss_region + offset;
685
686 if (phdr->p_filesz) {
687 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
688 ret = request_firmware(&seg_fw, seg_name, qproc->dev);
689 if (ret) {
690 dev_err(qproc->dev, "failed to load %s\n", seg_name);
691 goto release_firmware;
692 }
693
694 memcpy(ptr, seg_fw->data, seg_fw->size);
695
696 release_firmware(seg_fw);
697 }
698
699 if (phdr->p_memsz > phdr->p_filesz) {
700 memset(ptr + phdr->p_filesz, 0,
701 phdr->p_memsz - phdr->p_filesz);
702 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700703 size += phdr->p_memsz;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700704 }
705
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530706 /* Transfer ownership of modem ddr region to q6 */
707 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
708 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800709 if (ret) {
710 dev_err(qproc->dev,
711 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100712 ret = -EAGAIN;
713 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800714 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530715
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530716 boot_addr = relocate ? qproc->mpss_phys : min_addr;
717 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
718 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
719 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
720
Bjorn Andersson72beb492016-07-12 17:15:45 -0700721 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
722 if (ret == -ETIMEDOUT)
723 dev_err(qproc->dev, "MPSS authentication timed out\n");
724 else if (ret < 0)
725 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
726
Bjorn Andersson051fb702016-06-20 14:28:41 -0700727release_firmware:
728 release_firmware(fw);
729
730 return ret < 0 ? ret : 0;
731}
732
733static int q6v5_start(struct rproc *rproc)
734{
735 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530736 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700737 int ret;
738
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530739 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
740 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700741 if (ret) {
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530742 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700743 return ret;
744 }
745
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530746 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
747 qproc->proxy_clk_count);
748 if (ret) {
749 dev_err(qproc->dev, "failed to enable proxy clocks\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530750 goto disable_proxy_reg;
751 }
752
753 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
754 qproc->active_reg_count);
755 if (ret) {
756 dev_err(qproc->dev, "failed to enable supplies\n");
757 goto disable_proxy_clk;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530758 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700759 ret = reset_control_deassert(qproc->mss_restart);
760 if (ret) {
761 dev_err(qproc->dev, "failed to deassert mss restart\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530762 goto disable_vdd;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700763 }
764
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530765 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
766 qproc->active_clk_count);
767 if (ret) {
768 dev_err(qproc->dev, "failed to enable clocks\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700769 goto assert_reset;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530770 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700771
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530772 /* Assign MBA image access in DDR to q6 */
773 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
774 qproc->mba_phys,
775 qproc->mba_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800776 if (xfermemop_ret) {
777 dev_err(qproc->dev,
778 "assigning Q6 access to mba memory failed: %d\n",
779 xfermemop_ret);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530780 goto disable_active_clks;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800781 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530782
Bjorn Andersson051fb702016-06-20 14:28:41 -0700783 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
784
785 ret = q6v5proc_reset(qproc);
786 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530787 goto reclaim_mba;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700788
789 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
790 if (ret == -ETIMEDOUT) {
791 dev_err(qproc->dev, "MBA boot timed out\n");
792 goto halt_axi_ports;
793 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
794 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
795 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
796 ret = -EINVAL;
797 goto halt_axi_ports;
798 }
799
800 dev_info(qproc->dev, "MBA booted, loading mpss\n");
801
802 ret = q6v5_mpss_load(qproc);
803 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530804 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700805
806 ret = wait_for_completion_timeout(&qproc->start_done,
807 msecs_to_jiffies(5000));
808 if (ret == 0) {
809 dev_err(qproc->dev, "start timed out\n");
810 ret = -ETIMEDOUT;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530811 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700812 }
813
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530814 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
815 qproc->mba_phys,
816 qproc->mba_size);
817 if (xfermemop_ret)
818 dev_err(qproc->dev,
819 "Failed to reclaim mba buffer system may become unstable\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700820 qproc->running = true;
821
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530822 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
823 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530824 q6v5_regulator_disable(qproc, qproc->proxy_regs,
825 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700826
827 return 0;
828
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530829reclaim_mpss:
830 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
831 false, qproc->mpss_phys,
832 qproc->mpss_size);
833 WARN_ON(xfermemop_ret);
834
Bjorn Andersson051fb702016-06-20 14:28:41 -0700835halt_axi_ports:
836 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
837 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
838 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530839
840reclaim_mba:
841 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
842 qproc->mba_phys,
843 qproc->mba_size);
844 if (xfermemop_ret) {
845 dev_err(qproc->dev,
846 "Failed to reclaim mba buffer, system may become unstable\n");
847 }
848
849disable_active_clks:
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530850 q6v5_clk_disable(qproc->dev, qproc->active_clks,
851 qproc->active_clk_count);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530852
Bjorn Andersson051fb702016-06-20 14:28:41 -0700853assert_reset:
854 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530855disable_vdd:
856 q6v5_regulator_disable(qproc, qproc->active_regs,
857 qproc->active_reg_count);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530858disable_proxy_clk:
859 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
860 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530861disable_proxy_reg:
862 q6v5_regulator_disable(qproc, qproc->proxy_regs,
863 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700864
865 return ret;
866}
867
868static int q6v5_stop(struct rproc *rproc)
869{
870 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
871 int ret;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530872 u32 val;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700873
874 qproc->running = false;
875
876 qcom_smem_state_update_bits(qproc->state,
877 BIT(qproc->stop_bit), BIT(qproc->stop_bit));
878
879 ret = wait_for_completion_timeout(&qproc->stop_done,
880 msecs_to_jiffies(5000));
881 if (ret == 0)
882 dev_err(qproc->dev, "timed out on wait\n");
883
884 qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
885
886 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
887 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
888 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530889 if (qproc->version == MSS_MSM8996) {
890 /*
891 * To avoid high MX current during LPASS/MSS restart.
892 */
893 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
894 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
895 QDSP6v56_CLAMP_QMC_MEM;
896 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
897 }
898
Bjorn Andersson051fb702016-06-20 14:28:41 -0700899
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530900 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
901 qproc->mpss_phys, qproc->mpss_size);
902 WARN_ON(ret);
903
Bjorn Andersson051fb702016-06-20 14:28:41 -0700904 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530905 q6v5_clk_disable(qproc->dev, qproc->active_clks,
906 qproc->active_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530907 q6v5_regulator_disable(qproc, qproc->active_regs,
908 qproc->active_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700909
910 return 0;
911}
912
913static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
914{
915 struct q6v5 *qproc = rproc->priv;
916 int offset;
917
918 offset = da - qproc->mpss_reloc;
919 if (offset < 0 || offset + len > qproc->mpss_size)
920 return NULL;
921
922 return qproc->mpss_region + offset;
923}
924
925static const struct rproc_ops q6v5_ops = {
926 .start = q6v5_start,
927 .stop = q6v5_stop,
928 .da_to_va = q6v5_da_to_va,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -0800929 .find_rsc_table = q6v5_find_rsc_table,
930 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700931};
932
933static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
934{
935 struct q6v5 *qproc = dev;
936 size_t len;
937 char *msg;
938
939 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
940 if (!qproc->running) {
941 complete(&qproc->stop_done);
942 return IRQ_HANDLED;
943 }
944
945 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
946 if (!IS_ERR(msg) && len > 0 && msg[0])
947 dev_err(qproc->dev, "watchdog received: %s\n", msg);
948 else
949 dev_err(qproc->dev, "watchdog without message\n");
950
951 rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
952
953 if (!IS_ERR(msg))
954 msg[0] = '\0';
955
956 return IRQ_HANDLED;
957}
958
959static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
960{
961 struct q6v5 *qproc = dev;
962 size_t len;
963 char *msg;
964
965 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
966 if (!IS_ERR(msg) && len > 0 && msg[0])
967 dev_err(qproc->dev, "fatal error received: %s\n", msg);
968 else
969 dev_err(qproc->dev, "fatal error without message\n");
970
971 rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
972
973 if (!IS_ERR(msg))
974 msg[0] = '\0';
975
976 return IRQ_HANDLED;
977}
978
979static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
980{
981 struct q6v5 *qproc = dev;
982
983 complete(&qproc->start_done);
984 return IRQ_HANDLED;
985}
986
987static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
988{
989 struct q6v5 *qproc = dev;
990
991 complete(&qproc->stop_done);
992 return IRQ_HANDLED;
993}
994
995static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
996{
997 struct of_phandle_args args;
998 struct resource *res;
999 int ret;
1000
1001 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1002 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001003 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001004 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001005
1006 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1007 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001008 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001009 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001010
1011 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1012 "qcom,halt-regs", 3, 0, &args);
1013 if (ret < 0) {
1014 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1015 return -EINVAL;
1016 }
1017
1018 qproc->halt_map = syscon_node_to_regmap(args.np);
1019 of_node_put(args.np);
1020 if (IS_ERR(qproc->halt_map))
1021 return PTR_ERR(qproc->halt_map);
1022
1023 qproc->halt_q6 = args.args[0];
1024 qproc->halt_modem = args.args[1];
1025 qproc->halt_nc = args.args[2];
1026
1027 return 0;
1028}
1029
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301030static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1031 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001032{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301033 int i;
1034
1035 if (!clk_names)
1036 return 0;
1037
1038 for (i = 0; clk_names[i]; i++) {
1039 clks[i] = devm_clk_get(dev, clk_names[i]);
1040 if (IS_ERR(clks[i])) {
1041 int rc = PTR_ERR(clks[i]);
1042
1043 if (rc != -EPROBE_DEFER)
1044 dev_err(dev, "Failed to get %s clock\n",
1045 clk_names[i]);
1046 return rc;
1047 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001048 }
1049
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301050 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001051}
1052
1053static int q6v5_init_reset(struct q6v5 *qproc)
1054{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001055 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1056 NULL);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001057 if (IS_ERR(qproc->mss_restart)) {
1058 dev_err(qproc->dev, "failed to acquire mss restart\n");
1059 return PTR_ERR(qproc->mss_restart);
1060 }
1061
1062 return 0;
1063}
1064
1065static int q6v5_request_irq(struct q6v5 *qproc,
1066 struct platform_device *pdev,
1067 const char *name,
1068 irq_handler_t thread_fn)
1069{
1070 int ret;
1071
1072 ret = platform_get_irq_byname(pdev, name);
1073 if (ret < 0) {
1074 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
1075 return ret;
1076 }
1077
1078 ret = devm_request_threaded_irq(&pdev->dev, ret,
1079 NULL, thread_fn,
1080 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
1081 "q6v5", qproc);
1082 if (ret)
1083 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
1084
1085 return ret;
1086}
1087
1088static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1089{
1090 struct device_node *child;
1091 struct device_node *node;
1092 struct resource r;
1093 int ret;
1094
1095 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1096 node = of_parse_phandle(child, "memory-region", 0);
1097 ret = of_address_to_resource(node, 0, &r);
1098 if (ret) {
1099 dev_err(qproc->dev, "unable to resolve mba region\n");
1100 return ret;
1101 }
1102
1103 qproc->mba_phys = r.start;
1104 qproc->mba_size = resource_size(&r);
1105 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1106 if (!qproc->mba_region) {
1107 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1108 &r.start, qproc->mba_size);
1109 return -EBUSY;
1110 }
1111
1112 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1113 node = of_parse_phandle(child, "memory-region", 0);
1114 ret = of_address_to_resource(node, 0, &r);
1115 if (ret) {
1116 dev_err(qproc->dev, "unable to resolve mpss region\n");
1117 return ret;
1118 }
1119
1120 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1121 qproc->mpss_size = resource_size(&r);
1122 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1123 if (!qproc->mpss_region) {
1124 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1125 &r.start, qproc->mpss_size);
1126 return -EBUSY;
1127 }
1128
1129 return 0;
1130}
1131
1132static int q6v5_probe(struct platform_device *pdev)
1133{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301134 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001135 struct q6v5 *qproc;
1136 struct rproc *rproc;
1137 int ret;
1138
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301139 desc = of_device_get_match_data(&pdev->dev);
1140 if (!desc)
1141 return -EINVAL;
1142
Bjorn Andersson051fb702016-06-20 14:28:41 -07001143 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301144 desc->hexagon_mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001145 if (!rproc) {
1146 dev_err(&pdev->dev, "failed to allocate rproc\n");
1147 return -ENOMEM;
1148 }
1149
Bjorn Andersson051fb702016-06-20 14:28:41 -07001150 qproc = (struct q6v5 *)rproc->priv;
1151 qproc->dev = &pdev->dev;
1152 qproc->rproc = rproc;
1153 platform_set_drvdata(pdev, qproc);
1154
1155 init_completion(&qproc->start_done);
1156 init_completion(&qproc->stop_done);
1157
1158 ret = q6v5_init_mem(qproc, pdev);
1159 if (ret)
1160 goto free_rproc;
1161
1162 ret = q6v5_alloc_memory_region(qproc);
1163 if (ret)
1164 goto free_rproc;
1165
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301166 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1167 desc->proxy_clk_names);
1168 if (ret < 0) {
1169 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001170 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301171 }
1172 qproc->proxy_clk_count = ret;
1173
1174 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1175 desc->active_clk_names);
1176 if (ret < 0) {
1177 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1178 goto free_rproc;
1179 }
1180 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001181
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301182 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1183 desc->proxy_supply);
1184 if (ret < 0) {
1185 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001186 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301187 }
1188 qproc->proxy_reg_count = ret;
1189
1190 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1191 desc->active_supply);
1192 if (ret < 0) {
1193 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1194 goto free_rproc;
1195 }
1196 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001197
1198 ret = q6v5_init_reset(qproc);
1199 if (ret)
1200 goto free_rproc;
1201
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301202 qproc->version = desc->version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301203 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001204 ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
1205 if (ret < 0)
1206 goto free_rproc;
1207
1208 ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
1209 if (ret < 0)
1210 goto free_rproc;
1211
1212 ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
1213 if (ret < 0)
1214 goto free_rproc;
1215
1216 ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
1217 if (ret < 0)
1218 goto free_rproc;
1219
1220 qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
Wei Yongjun4e968d92016-07-29 15:56:52 +00001221 if (IS_ERR(qproc->state)) {
1222 ret = PTR_ERR(qproc->state);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001223 goto free_rproc;
Wei Yongjun4e968d92016-07-29 15:56:52 +00001224 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301225 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1226 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001227 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001228 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Bjorn Andersson4b489212017-01-29 14:05:50 -08001229
Bjorn Andersson051fb702016-06-20 14:28:41 -07001230 ret = rproc_add(rproc);
1231 if (ret)
1232 goto free_rproc;
1233
1234 return 0;
1235
1236free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001237 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001238
1239 return ret;
1240}
1241
1242static int q6v5_remove(struct platform_device *pdev)
1243{
1244 struct q6v5 *qproc = platform_get_drvdata(pdev);
1245
1246 rproc_del(qproc->rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001247
1248 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001249 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001250 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001251
1252 return 0;
1253}
1254
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301255static const struct rproc_hexagon_res msm8996_mss = {
1256 .hexagon_mba_image = "mba.mbn",
1257 .proxy_clk_names = (char*[]){
1258 "xo",
1259 "pnoc",
1260 NULL
1261 },
1262 .active_clk_names = (char*[]){
1263 "iface",
1264 "bus",
1265 "mem",
1266 "gpll0_mss_clk",
1267 NULL
1268 },
1269 .need_mem_protection = true,
1270 .version = MSS_MSM8996,
1271};
1272
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301273static const struct rproc_hexagon_res msm8916_mss = {
1274 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301275 .proxy_supply = (struct qcom_mss_reg_res[]) {
1276 {
1277 .supply = "mx",
1278 .uV = 1050000,
1279 },
1280 {
1281 .supply = "cx",
1282 .uA = 100000,
1283 },
1284 {
1285 .supply = "pll",
1286 .uA = 100000,
1287 },
1288 {}
1289 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301290 .proxy_clk_names = (char*[]){
1291 "xo",
1292 NULL
1293 },
1294 .active_clk_names = (char*[]){
1295 "iface",
1296 "bus",
1297 "mem",
1298 NULL
1299 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301300 .need_mem_protection = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301301 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301302};
1303
1304static const struct rproc_hexagon_res msm8974_mss = {
1305 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301306 .proxy_supply = (struct qcom_mss_reg_res[]) {
1307 {
1308 .supply = "mx",
1309 .uV = 1050000,
1310 },
1311 {
1312 .supply = "cx",
1313 .uA = 100000,
1314 },
1315 {
1316 .supply = "pll",
1317 .uA = 100000,
1318 },
1319 {}
1320 },
1321 .active_supply = (struct qcom_mss_reg_res[]) {
1322 {
1323 .supply = "mss",
1324 .uV = 1050000,
1325 .uA = 100000,
1326 },
1327 {}
1328 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301329 .proxy_clk_names = (char*[]){
1330 "xo",
1331 NULL
1332 },
1333 .active_clk_names = (char*[]){
1334 "iface",
1335 "bus",
1336 "mem",
1337 NULL
1338 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301339 .need_mem_protection = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301340 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301341};
1342
Bjorn Andersson051fb702016-06-20 14:28:41 -07001343static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301344 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1345 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1346 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301347 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001348 { },
1349};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001350MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001351
1352static struct platform_driver q6v5_driver = {
1353 .probe = q6v5_probe,
1354 .remove = q6v5_remove,
1355 .driver = {
1356 .name = "qcom-q6v5-pil",
1357 .of_match_table = q6v5_of_match,
1358 },
1359};
1360module_platform_driver(q6v5_driver);
1361
1362MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1363MODULE_LICENSE("GPL v2");