blob: e04319573c9114c1c7456f7d0680eb27db07dd3f [file] [log] [blame]
Bjorn Andersson051fb702016-06-20 14:28:41 -07001/*
2 * Qualcomm Peripheral Image Loader
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/mfd/syscon.h>
24#include <linux/module.h>
25#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053026#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070027#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/regulator/consumer.h>
30#include <linux/remoteproc.h>
31#include <linux/reset.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080032#include <linux/soc/qcom/mdt_loader.h>
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053033#include <linux/iopoll.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070034
35#include "remoteproc_internal.h"
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080036#include "qcom_common.h"
Bjorn Andersson7d674732018-06-04 13:30:38 -070037#include "qcom_q6v5.h"
Bjorn Andersson051fb702016-06-20 14:28:41 -070038
39#include <linux/qcom_scm.h>
40
Bjorn Andersson051fb702016-06-20 14:28:41 -070041#define MPSS_CRASH_REASON_SMEM 421
42
43/* RMB Status Register Values */
44#define RMB_PBL_SUCCESS 0x1
45
46#define RMB_MBA_XPU_UNLOCKED 0x1
47#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
48#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
49#define RMB_MBA_AUTH_COMPLETE 0x4
50
51/* PBL/MBA interface registers */
52#define RMB_MBA_IMAGE_REG 0x00
53#define RMB_PBL_STATUS_REG 0x04
54#define RMB_MBA_COMMAND_REG 0x08
55#define RMB_MBA_STATUS_REG 0x0C
56#define RMB_PMI_META_DATA_REG 0x10
57#define RMB_PMI_CODE_START_REG 0x14
58#define RMB_PMI_CODE_LENGTH_REG 0x18
Sibi Sankar231f67d2018-05-21 22:57:13 +053059#define RMB_MBA_MSS_STATUS 0x40
60#define RMB_MBA_ALT_RESET 0x44
Bjorn Andersson051fb702016-06-20 14:28:41 -070061
62#define RMB_CMD_META_DATA_READY 0x1
63#define RMB_CMD_LOAD_READY 0x2
64
65/* QDSP6SS Register Offsets */
66#define QDSP6SS_RESET_REG 0x014
67#define QDSP6SS_GFMUX_CTL_REG 0x020
68#define QDSP6SS_PWR_CTL_REG 0x030
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053069#define QDSP6SS_MEM_PWR_CTL 0x0B0
70#define QDSP6SS_STRAP_ACC 0x110
Bjorn Andersson051fb702016-06-20 14:28:41 -070071
72/* AXI Halt Register Offsets */
73#define AXI_HALTREQ_REG 0x0
74#define AXI_HALTACK_REG 0x4
75#define AXI_IDLE_REG 0x8
76
77#define HALT_ACK_TIMEOUT_MS 100
78
79/* QDSP6SS_RESET */
80#define Q6SS_STOP_CORE BIT(0)
81#define Q6SS_CORE_ARES BIT(1)
82#define Q6SS_BUS_ARES_ENABLE BIT(2)
83
84/* QDSP6SS_GFMUX_CTL */
85#define Q6SS_CLK_ENABLE BIT(1)
86
87/* QDSP6SS_PWR_CTL */
88#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
89#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
90#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
91#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
92#define Q6SS_ETB_SLP_NRET_N BIT(17)
93#define Q6SS_L2DATA_STBY_N BIT(18)
94#define Q6SS_SLP_RET_N BIT(19)
95#define Q6SS_CLAMP_IO BIT(20)
96#define QDSS_BHS_ON BIT(21)
97#define QDSS_LDO_BYP BIT(22)
98
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +053099/* QDSP6v56 parameters */
100#define QDSP6v56_LDO_BYP BIT(25)
101#define QDSP6v56_BHS_ON BIT(24)
102#define QDSP6v56_CLAMP_WL BIT(21)
103#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
104#define HALT_CHECK_MAX_LOOPS 200
105#define QDSP6SS_XO_CBCR 0x0038
106#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
107
Sibi Sankar231f67d2018-05-21 22:57:13 +0530108/* QDSP6v65 parameters */
109#define QDSP6SS_SLEEP 0x3C
110#define QDSP6SS_BOOT_CORE_START 0x400
111#define QDSP6SS_BOOT_CMD 0x404
112#define SLEEP_CHECK_MAX_LOOPS 200
113#define BOOT_FSM_TIMEOUT 10000
114
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530115struct reg_info {
116 struct regulator *reg;
117 int uV;
118 int uA;
119};
120
121struct qcom_mss_reg_res {
122 const char *supply;
123 int uV;
124 int uA;
125};
126
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530127struct rproc_hexagon_res {
128 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100129 struct qcom_mss_reg_res *proxy_supply;
130 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530131 char **proxy_clk_names;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530132 char **reset_clk_names;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530133 char **active_clk_names;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530134 int version;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530135 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530136 bool has_alt_reset;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530137};
138
Bjorn Andersson051fb702016-06-20 14:28:41 -0700139struct q6v5 {
140 struct device *dev;
141 struct rproc *rproc;
142
143 void __iomem *reg_base;
144 void __iomem *rmb_base;
145
146 struct regmap *halt_map;
147 u32 halt_q6;
148 u32 halt_modem;
149 u32 halt_nc;
150
151 struct reset_control *mss_restart;
152
Bjorn Andersson7d674732018-06-04 13:30:38 -0700153 struct qcom_q6v5 q6v5;
Sibi Sankar663e9842018-05-21 22:57:09 +0530154
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530155 struct clk *active_clks[8];
Sibi Sankar231f67d2018-05-21 22:57:13 +0530156 struct clk *reset_clks[4];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530157 struct clk *proxy_clks[4];
158 int active_clk_count;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530159 int reset_clk_count;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530160 int proxy_clk_count;
161
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530162 struct reg_info active_regs[1];
163 struct reg_info proxy_regs[3];
164 int active_reg_count;
165 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700166
Bjorn Andersson051fb702016-06-20 14:28:41 -0700167 bool running;
168
169 phys_addr_t mba_phys;
170 void *mba_region;
171 size_t mba_size;
172
173 phys_addr_t mpss_phys;
174 phys_addr_t mpss_reloc;
175 void *mpss_region;
176 size_t mpss_size;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800177
Sibi Sankar47254962018-05-21 22:57:14 +0530178 struct qcom_rproc_glink glink_subdev;
Bjorn Andersson4b489212017-01-29 14:05:50 -0800179 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1e140df2017-07-24 22:56:43 -0700180 struct qcom_rproc_ssr ssr_subdev;
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700181 struct qcom_sysmon *sysmon;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530182 bool need_mem_protection;
Sibi Sankar231f67d2018-05-21 22:57:13 +0530183 bool has_alt_reset;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530184 int mpss_perm;
185 int mba_perm;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530186 int version;
187};
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530188
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530189enum {
190 MSS_MSM8916,
191 MSS_MSM8974,
192 MSS_MSM8996,
Sibi Sankar231f67d2018-05-21 22:57:13 +0530193 MSS_SDM845,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700194};
195
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530196static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
197 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700198{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530199 int rc;
200 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700201
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800202 if (!reg_res)
203 return 0;
204
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530205 for (i = 0; reg_res[i].supply; i++) {
206 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
207 if (IS_ERR(regs[i].reg)) {
208 rc = PTR_ERR(regs[i].reg);
209 if (rc != -EPROBE_DEFER)
210 dev_err(dev, "Failed to get %s\n regulator",
211 reg_res[i].supply);
212 return rc;
213 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700214
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530215 regs[i].uV = reg_res[i].uV;
216 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700217 }
218
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530219 return i;
220}
221
222static int q6v5_regulator_enable(struct q6v5 *qproc,
223 struct reg_info *regs, int count)
224{
225 int ret;
226 int i;
227
228 for (i = 0; i < count; i++) {
229 if (regs[i].uV > 0) {
230 ret = regulator_set_voltage(regs[i].reg,
231 regs[i].uV, INT_MAX);
232 if (ret) {
233 dev_err(qproc->dev,
234 "Failed to request voltage for %d.\n",
235 i);
236 goto err;
237 }
238 }
239
240 if (regs[i].uA > 0) {
241 ret = regulator_set_load(regs[i].reg,
242 regs[i].uA);
243 if (ret < 0) {
244 dev_err(qproc->dev,
245 "Failed to set regulator mode\n");
246 goto err;
247 }
248 }
249
250 ret = regulator_enable(regs[i].reg);
251 if (ret) {
252 dev_err(qproc->dev, "Regulator enable failed\n");
253 goto err;
254 }
255 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700256
257 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530258err:
259 for (; i >= 0; i--) {
260 if (regs[i].uV > 0)
261 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
262
263 if (regs[i].uA > 0)
264 regulator_set_load(regs[i].reg, 0);
265
266 regulator_disable(regs[i].reg);
267 }
268
269 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700270}
271
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530272static void q6v5_regulator_disable(struct q6v5 *qproc,
273 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700274{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530275 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700276
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530277 for (i = 0; i < count; i++) {
278 if (regs[i].uV > 0)
279 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700280
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530281 if (regs[i].uA > 0)
282 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700283
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530284 regulator_disable(regs[i].reg);
285 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700286}
287
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530288static int q6v5_clk_enable(struct device *dev,
289 struct clk **clks, int count)
290{
291 int rc;
292 int i;
293
294 for (i = 0; i < count; i++) {
295 rc = clk_prepare_enable(clks[i]);
296 if (rc) {
297 dev_err(dev, "Clock enable failed\n");
298 goto err;
299 }
300 }
301
302 return 0;
303err:
304 for (i--; i >= 0; i--)
305 clk_disable_unprepare(clks[i]);
306
307 return rc;
308}
309
310static void q6v5_clk_disable(struct device *dev,
311 struct clk **clks, int count)
312{
313 int i;
314
315 for (i = 0; i < count; i++)
316 clk_disable_unprepare(clks[i]);
317}
318
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530319static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
320 bool remote_owner, phys_addr_t addr,
321 size_t size)
322{
323 struct qcom_scm_vmperm next;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530324
325 if (!qproc->need_mem_protection)
326 return 0;
327 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
328 return 0;
329 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
330 return 0;
331
332 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
333 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
334
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800335 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
336 current_perm, &next, 1);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530337}
338
Bjorn Andersson051fb702016-06-20 14:28:41 -0700339static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
340{
341 struct q6v5 *qproc = rproc->priv;
342
343 memcpy(qproc->mba_region, fw->data, fw->size);
344
345 return 0;
346}
347
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530348static int q6v5_reset_assert(struct q6v5 *qproc)
349{
Sibi Sankar231f67d2018-05-21 22:57:13 +0530350 if (qproc->has_alt_reset)
351 return reset_control_reset(qproc->mss_restart);
352 else
353 return reset_control_assert(qproc->mss_restart);
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530354}
355
356static int q6v5_reset_deassert(struct q6v5 *qproc)
357{
Sibi Sankar231f67d2018-05-21 22:57:13 +0530358 int ret;
359
360 if (qproc->has_alt_reset) {
361 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
362 ret = reset_control_reset(qproc->mss_restart);
363 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
364 } else {
365 ret = reset_control_deassert(qproc->mss_restart);
366 }
367
368 return ret;
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530369}
370
Bjorn Andersson051fb702016-06-20 14:28:41 -0700371static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
372{
373 unsigned long timeout;
374 s32 val;
375
376 timeout = jiffies + msecs_to_jiffies(ms);
377 for (;;) {
378 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
379 if (val)
380 break;
381
382 if (time_after(jiffies, timeout))
383 return -ETIMEDOUT;
384
385 msleep(1);
386 }
387
388 return val;
389}
390
391static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
392{
393
394 unsigned long timeout;
395 s32 val;
396
397 timeout = jiffies + msecs_to_jiffies(ms);
398 for (;;) {
399 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
400 if (val < 0)
401 break;
402
403 if (!status && val)
404 break;
405 else if (status && val == status)
406 break;
407
408 if (time_after(jiffies, timeout))
409 return -ETIMEDOUT;
410
411 msleep(1);
412 }
413
414 return val;
415}
416
417static int q6v5proc_reset(struct q6v5 *qproc)
418{
419 u32 val;
420 int ret;
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530421 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700422
Sibi Sankar231f67d2018-05-21 22:57:13 +0530423 if (qproc->version == MSS_SDM845) {
424 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
425 val |= 0x1;
426 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700427
Sibi Sankar231f67d2018-05-21 22:57:13 +0530428 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
429 val, !(val & BIT(31)), 1,
430 SLEEP_CHECK_MAX_LOOPS);
431 if (ret) {
432 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
433 return -ETIMEDOUT;
434 }
435
436 /* De-assert QDSP6 stop core */
437 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
438 /* Trigger boot FSM */
439 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
440
441 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
442 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
443 if (ret) {
444 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
445 /* Reset the modem so that boot FSM is in reset state */
446 q6v5_reset_deassert(qproc);
447 return ret;
448 }
449
450 goto pbl_wait;
451 } else if (qproc->version == MSS_MSM8996) {
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530452 /* Override the ACC value if required */
453 writel(QDSP6SS_ACC_OVERRIDE_VAL,
454 qproc->reg_base + QDSP6SS_STRAP_ACC);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700455
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530456 /* Assert resets, stop core */
457 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
458 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
459 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700460
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530461 /* BHS require xo cbcr to be enabled */
462 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
463 val |= 0x1;
464 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
465
466 /* Read CLKOFF bit to go low indicating CLK is enabled */
467 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
468 val, !(val & BIT(31)), 1,
469 HALT_CHECK_MAX_LOOPS);
470 if (ret) {
471 dev_err(qproc->dev,
472 "xo cbcr enabling timed out (rc:%d)\n", ret);
473 return ret;
474 }
475 /* Enable power block headswitch and wait for it to stabilize */
476 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
477 val |= QDSP6v56_BHS_ON;
478 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
479 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
480 udelay(1);
481
482 /* Put LDO in bypass mode */
483 val |= QDSP6v56_LDO_BYP;
484 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
485
486 /* Deassert QDSP6 compiler memory clamp */
487 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
488 val &= ~QDSP6v56_CLAMP_QMC_MEM;
489 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
490
491 /* Deassert memory peripheral sleep and L2 memory standby */
492 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
493 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
494
495 /* Turn on L1, L2, ETB and JU memories 1 at a time */
496 val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
497 for (i = 19; i >= 0; i--) {
498 val |= BIT(i);
499 writel(val, qproc->reg_base +
500 QDSP6SS_MEM_PWR_CTL);
501 /*
502 * Read back value to ensure the write is done then
503 * wait for 1us for both memory peripheral and data
504 * array to turn on.
505 */
506 val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
507 udelay(1);
508 }
509 /* Remove word line clamp */
510 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
511 val &= ~QDSP6v56_CLAMP_WL;
512 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
513 } else {
514 /* Assert resets, stop core */
515 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
516 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
517 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
518
519 /* Enable power block headswitch and wait for it to stabilize */
520 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
521 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
522 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
523 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
524 udelay(1);
525 /*
526 * Turn on memories. L2 banks should be done individually
527 * to minimize inrush current.
528 */
529 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
530 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
531 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
532 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
533 val |= Q6SS_L2DATA_SLP_NRET_N_2;
534 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
535 val |= Q6SS_L2DATA_SLP_NRET_N_1;
536 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
537 val |= Q6SS_L2DATA_SLP_NRET_N_0;
538 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
539 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700540 /* Remove IO clamp */
541 val &= ~Q6SS_CLAMP_IO;
542 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
543
544 /* Bring core out of reset */
545 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
546 val &= ~Q6SS_CORE_ARES;
547 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
548
549 /* Turn on core clock */
550 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
551 val |= Q6SS_CLK_ENABLE;
552 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
553
554 /* Start core execution */
555 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
556 val &= ~Q6SS_STOP_CORE;
557 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
558
Sibi Sankar231f67d2018-05-21 22:57:13 +0530559pbl_wait:
Bjorn Andersson051fb702016-06-20 14:28:41 -0700560 /* Wait for PBL status */
561 ret = q6v5_rmb_pbl_wait(qproc, 1000);
562 if (ret == -ETIMEDOUT) {
563 dev_err(qproc->dev, "PBL boot timed out\n");
564 } else if (ret != RMB_PBL_SUCCESS) {
565 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
566 ret = -EINVAL;
567 } else {
568 ret = 0;
569 }
570
571 return ret;
572}
573
574static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
575 struct regmap *halt_map,
576 u32 offset)
577{
578 unsigned long timeout;
579 unsigned int val;
580 int ret;
581
582 /* Check if we're already idle */
583 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
584 if (!ret && val)
585 return;
586
587 /* Assert halt request */
588 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
589
590 /* Wait for halt */
591 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
592 for (;;) {
593 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
594 if (ret || val || time_after(jiffies, timeout))
595 break;
596
597 msleep(1);
598 }
599
600 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
601 if (ret || !val)
602 dev_err(qproc->dev, "port failed halt\n");
603
604 /* Clear halt request (port will remain halted until reset) */
605 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
606}
607
608static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
609{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700610 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700611 dma_addr_t phys;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530612 int mdata_perm;
613 int xferop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700614 void *ptr;
615 int ret;
616
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700617 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700618 if (!ptr) {
619 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
620 return -ENOMEM;
621 }
622
623 memcpy(ptr, fw->data, fw->size);
624
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530625 /* Hypervisor mapping to access metadata by modem */
626 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
627 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
628 true, phys, fw->size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800629 if (ret) {
630 dev_err(qproc->dev,
631 "assigning Q6 access to metadata failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100632 ret = -EAGAIN;
633 goto free_dma_attrs;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800634 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530635
Bjorn Andersson051fb702016-06-20 14:28:41 -0700636 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
637 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
638
639 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
640 if (ret == -ETIMEDOUT)
641 dev_err(qproc->dev, "MPSS header authentication timed out\n");
642 else if (ret < 0)
643 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
644
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530645 /* Metadata authentication done, remove modem access */
646 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
647 false, phys, fw->size);
648 if (xferop_ret)
649 dev_warn(qproc->dev,
650 "mdt buffer not reclaimed system may become unstable\n");
651
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100652free_dma_attrs:
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700653 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700654
655 return ret < 0 ? ret : 0;
656}
657
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800658static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
659{
660 if (phdr->p_type != PT_LOAD)
661 return false;
662
663 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
664 return false;
665
666 if (!phdr->p_memsz)
667 return false;
668
669 return true;
670}
671
672static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700673{
674 const struct elf32_phdr *phdrs;
675 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800676 const struct firmware *seg_fw;
677 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700678 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800679 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700680 phys_addr_t boot_addr;
Stefan Agnerd7dc8992018-06-14 15:28:02 -0700681 phys_addr_t min_addr = PHYS_ADDR_MAX;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800682 phys_addr_t max_addr = 0;
683 bool relocate = false;
684 char seg_name[10];
Bjorn Andersson01625cc52017-02-15 14:00:41 -0800685 ssize_t offset;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530686 size_t size = 0;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800687 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700688 int ret;
689 int i;
690
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800691 ret = request_firmware(&fw, "modem.mdt", qproc->dev);
692 if (ret < 0) {
693 dev_err(qproc->dev, "unable to load modem.mdt\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700694 return ret;
695 }
696
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800697 /* Initialize the RMB validator */
698 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
699
700 ret = q6v5_mpss_init_image(qproc, fw);
701 if (ret)
702 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700703
704 ehdr = (struct elf32_hdr *)fw->data;
705 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800706
707 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -0700708 phdr = &phdrs[i];
709
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800710 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700711 continue;
712
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800713 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
714 relocate = true;
715
716 if (phdr->p_paddr < min_addr)
717 min_addr = phdr->p_paddr;
718
719 if (phdr->p_paddr + phdr->p_memsz > max_addr)
720 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
721 }
722
723 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530724 /* Load firmware segments */
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800725 for (i = 0; i < ehdr->e_phnum; i++) {
726 phdr = &phdrs[i];
727
728 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700729 continue;
730
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800731 offset = phdr->p_paddr - mpss_reloc;
732 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
733 dev_err(qproc->dev, "segment outside memory range\n");
734 ret = -EINVAL;
735 goto release_firmware;
736 }
737
738 ptr = qproc->mpss_region + offset;
739
740 if (phdr->p_filesz) {
741 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
742 ret = request_firmware(&seg_fw, seg_name, qproc->dev);
743 if (ret) {
744 dev_err(qproc->dev, "failed to load %s\n", seg_name);
745 goto release_firmware;
746 }
747
748 memcpy(ptr, seg_fw->data, seg_fw->size);
749
750 release_firmware(seg_fw);
751 }
752
753 if (phdr->p_memsz > phdr->p_filesz) {
754 memset(ptr + phdr->p_filesz, 0,
755 phdr->p_memsz - phdr->p_filesz);
756 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700757 size += phdr->p_memsz;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700758 }
759
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530760 /* Transfer ownership of modem ddr region to q6 */
761 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
762 qproc->mpss_phys, qproc->mpss_size);
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800763 if (ret) {
764 dev_err(qproc->dev,
765 "assigning Q6 access to mpss memory failed: %d\n", ret);
Christophe JAILLET1a5d5c52017-11-15 07:58:35 +0100766 ret = -EAGAIN;
767 goto release_firmware;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800768 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530769
Avaneesh Kumar Dwivedi94c90782017-10-24 21:22:25 +0530770 boot_addr = relocate ? qproc->mpss_phys : min_addr;
771 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
772 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
773 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
774
Bjorn Andersson72beb492016-07-12 17:15:45 -0700775 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
776 if (ret == -ETIMEDOUT)
777 dev_err(qproc->dev, "MPSS authentication timed out\n");
778 else if (ret < 0)
779 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
780
Bjorn Andersson051fb702016-06-20 14:28:41 -0700781release_firmware:
782 release_firmware(fw);
783
784 return ret < 0 ? ret : 0;
785}
786
787static int q6v5_start(struct rproc *rproc)
788{
789 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530790 int xfermemop_ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700791 int ret;
792
Bjorn Andersson7d674732018-06-04 13:30:38 -0700793 qcom_q6v5_prepare(&qproc->q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +0530794
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530795 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
796 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700797 if (ret) {
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530798 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Sibi Sankar663e9842018-05-21 22:57:09 +0530799 goto disable_irqs;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700800 }
801
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530802 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
803 qproc->proxy_clk_count);
804 if (ret) {
805 dev_err(qproc->dev, "failed to enable proxy clocks\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530806 goto disable_proxy_reg;
807 }
808
809 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
810 qproc->active_reg_count);
811 if (ret) {
812 dev_err(qproc->dev, "failed to enable supplies\n");
813 goto disable_proxy_clk;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530814 }
Sibi Sankar231f67d2018-05-21 22:57:13 +0530815
816 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
817 qproc->reset_clk_count);
818 if (ret) {
819 dev_err(qproc->dev, "failed to enable reset clocks\n");
820 goto disable_vdd;
821 }
822
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530823 ret = q6v5_reset_deassert(qproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700824 if (ret) {
825 dev_err(qproc->dev, "failed to deassert mss restart\n");
Sibi Sankar231f67d2018-05-21 22:57:13 +0530826 goto disable_reset_clks;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700827 }
828
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530829 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
830 qproc->active_clk_count);
831 if (ret) {
832 dev_err(qproc->dev, "failed to enable clocks\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700833 goto assert_reset;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530834 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700835
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530836 /* Assign MBA image access in DDR to q6 */
Sibi Sankar27248072018-04-18 01:14:15 +0530837 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
838 qproc->mba_phys, qproc->mba_size);
839 if (ret) {
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800840 dev_err(qproc->dev,
Sibi Sankar27248072018-04-18 01:14:15 +0530841 "assigning Q6 access to mba memory failed: %d\n", ret);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530842 goto disable_active_clks;
Bjorn Andersson9f2a4342017-11-06 22:26:41 -0800843 }
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530844
Bjorn Andersson051fb702016-06-20 14:28:41 -0700845 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
846
847 ret = q6v5proc_reset(qproc);
848 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530849 goto reclaim_mba;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700850
851 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
852 if (ret == -ETIMEDOUT) {
853 dev_err(qproc->dev, "MBA boot timed out\n");
854 goto halt_axi_ports;
855 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
856 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
857 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
858 ret = -EINVAL;
859 goto halt_axi_ports;
860 }
861
862 dev_info(qproc->dev, "MBA booted, loading mpss\n");
863
864 ret = q6v5_mpss_load(qproc);
865 if (ret)
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530866 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700867
Bjorn Andersson7d674732018-06-04 13:30:38 -0700868 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
869 if (ret == -ETIMEDOUT) {
Bjorn Andersson051fb702016-06-20 14:28:41 -0700870 dev_err(qproc->dev, "start timed out\n");
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530871 goto reclaim_mpss;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700872 }
873
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530874 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
875 qproc->mba_phys,
876 qproc->mba_size);
877 if (xfermemop_ret)
878 dev_err(qproc->dev,
879 "Failed to reclaim mba buffer system may become unstable\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700880 qproc->running = true;
881
Bjorn Andersson051fb702016-06-20 14:28:41 -0700882 return 0;
883
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530884reclaim_mpss:
885 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
886 false, qproc->mpss_phys,
887 qproc->mpss_size);
888 WARN_ON(xfermemop_ret);
889
Bjorn Andersson051fb702016-06-20 14:28:41 -0700890halt_axi_ports:
891 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
892 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
893 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530894
895reclaim_mba:
896 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
897 qproc->mba_phys,
898 qproc->mba_size);
899 if (xfermemop_ret) {
900 dev_err(qproc->dev,
901 "Failed to reclaim mba buffer, system may become unstable\n");
902 }
903
904disable_active_clks:
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530905 q6v5_clk_disable(qproc->dev, qproc->active_clks,
906 qproc->active_clk_count);
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530907
Bjorn Andersson051fb702016-06-20 14:28:41 -0700908assert_reset:
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530909 q6v5_reset_assert(qproc);
Sibi Sankar231f67d2018-05-21 22:57:13 +0530910disable_reset_clks:
911 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
912 qproc->reset_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530913disable_vdd:
914 q6v5_regulator_disable(qproc, qproc->active_regs,
915 qproc->active_reg_count);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530916disable_proxy_clk:
917 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
918 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530919disable_proxy_reg:
920 q6v5_regulator_disable(qproc, qproc->proxy_regs,
921 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700922
Sibi Sankar663e9842018-05-21 22:57:09 +0530923disable_irqs:
Bjorn Andersson7d674732018-06-04 13:30:38 -0700924 qcom_q6v5_unprepare(&qproc->q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +0530925
Bjorn Andersson051fb702016-06-20 14:28:41 -0700926 return ret;
927}
928
929static int q6v5_stop(struct rproc *rproc)
930{
931 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
932 int ret;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530933 u32 val;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700934
935 qproc->running = false;
936
Bjorn Andersson7d674732018-06-04 13:30:38 -0700937 ret = qcom_q6v5_request_stop(&qproc->q6v5);
938 if (ret == -ETIMEDOUT)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700939 dev_err(qproc->dev, "timed out on wait\n");
940
Bjorn Andersson051fb702016-06-20 14:28:41 -0700941 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
942 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
943 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +0530944 if (qproc->version == MSS_MSM8996) {
945 /*
946 * To avoid high MX current during LPASS/MSS restart.
947 */
948 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
949 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
950 QDSP6v56_CLAMP_QMC_MEM;
951 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
952 }
953
Bjorn Andersson051fb702016-06-20 14:28:41 -0700954
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +0530955 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
956 qproc->mpss_phys, qproc->mpss_size);
957 WARN_ON(ret);
958
Sibi Sankar9f135fa2018-05-21 22:57:12 +0530959 q6v5_reset_assert(qproc);
Sibi Sankar663e9842018-05-21 22:57:09 +0530960
Bjorn Andersson7d674732018-06-04 13:30:38 -0700961 ret = qcom_q6v5_unprepare(&qproc->q6v5);
962 if (ret) {
Sibi Sankar663e9842018-05-21 22:57:09 +0530963 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
964 qproc->proxy_clk_count);
965 q6v5_regulator_disable(qproc, qproc->proxy_regs,
966 qproc->proxy_reg_count);
967 }
968
Sibi Sankar231f67d2018-05-21 22:57:13 +0530969 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
970 qproc->reset_clk_count);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530971 q6v5_clk_disable(qproc->dev, qproc->active_clks,
972 qproc->active_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530973 q6v5_regulator_disable(qproc, qproc->active_regs,
974 qproc->active_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700975
976 return 0;
977}
978
979static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
980{
981 struct q6v5 *qproc = rproc->priv;
982 int offset;
983
984 offset = da - qproc->mpss_reloc;
985 if (offset < 0 || offset + len > qproc->mpss_size)
986 return NULL;
987
988 return qproc->mpss_region + offset;
989}
990
991static const struct rproc_ops q6v5_ops = {
992 .start = q6v5_start,
993 .stop = q6v5_stop,
994 .da_to_va = q6v5_da_to_va,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -0800995 .load = q6v5_load,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700996};
997
Bjorn Andersson7d674732018-06-04 13:30:38 -0700998static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700999{
Bjorn Andersson7d674732018-06-04 13:30:38 -07001000 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
Sibi Sankar663e9842018-05-21 22:57:09 +05301001
1002 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1003 qproc->proxy_clk_count);
1004 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1005 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001006}
1007
1008static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1009{
1010 struct of_phandle_args args;
1011 struct resource *res;
1012 int ret;
1013
1014 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1015 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001016 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001017 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001018
1019 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1020 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +00001021 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -07001022 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001023
1024 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1025 "qcom,halt-regs", 3, 0, &args);
1026 if (ret < 0) {
1027 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1028 return -EINVAL;
1029 }
1030
1031 qproc->halt_map = syscon_node_to_regmap(args.np);
1032 of_node_put(args.np);
1033 if (IS_ERR(qproc->halt_map))
1034 return PTR_ERR(qproc->halt_map);
1035
1036 qproc->halt_q6 = args.args[0];
1037 qproc->halt_modem = args.args[1];
1038 qproc->halt_nc = args.args[2];
1039
1040 return 0;
1041}
1042
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301043static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1044 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001045{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301046 int i;
1047
1048 if (!clk_names)
1049 return 0;
1050
1051 for (i = 0; clk_names[i]; i++) {
1052 clks[i] = devm_clk_get(dev, clk_names[i]);
1053 if (IS_ERR(clks[i])) {
1054 int rc = PTR_ERR(clks[i]);
1055
1056 if (rc != -EPROBE_DEFER)
1057 dev_err(dev, "Failed to get %s clock\n",
1058 clk_names[i]);
1059 return rc;
1060 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001061 }
1062
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301063 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001064}
1065
1066static int q6v5_init_reset(struct q6v5 *qproc)
1067{
Philipp Zabel5acbf7e2017-07-19 17:26:16 +02001068 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1069 NULL);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001070 if (IS_ERR(qproc->mss_restart)) {
1071 dev_err(qproc->dev, "failed to acquire mss restart\n");
1072 return PTR_ERR(qproc->mss_restart);
1073 }
1074
1075 return 0;
1076}
1077
Bjorn Andersson051fb702016-06-20 14:28:41 -07001078static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1079{
1080 struct device_node *child;
1081 struct device_node *node;
1082 struct resource r;
1083 int ret;
1084
1085 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1086 node = of_parse_phandle(child, "memory-region", 0);
1087 ret = of_address_to_resource(node, 0, &r);
1088 if (ret) {
1089 dev_err(qproc->dev, "unable to resolve mba region\n");
1090 return ret;
1091 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001092 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001093
1094 qproc->mba_phys = r.start;
1095 qproc->mba_size = resource_size(&r);
1096 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1097 if (!qproc->mba_region) {
1098 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1099 &r.start, qproc->mba_size);
1100 return -EBUSY;
1101 }
1102
1103 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1104 node = of_parse_phandle(child, "memory-region", 0);
1105 ret = of_address_to_resource(node, 0, &r);
1106 if (ret) {
1107 dev_err(qproc->dev, "unable to resolve mpss region\n");
1108 return ret;
1109 }
Tobias Jordan278d7442018-02-15 16:12:55 +01001110 of_node_put(node);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001111
1112 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1113 qproc->mpss_size = resource_size(&r);
1114 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1115 if (!qproc->mpss_region) {
1116 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1117 &r.start, qproc->mpss_size);
1118 return -EBUSY;
1119 }
1120
1121 return 0;
1122}
1123
1124static int q6v5_probe(struct platform_device *pdev)
1125{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301126 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001127 struct q6v5 *qproc;
1128 struct rproc *rproc;
1129 int ret;
1130
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301131 desc = of_device_get_match_data(&pdev->dev);
1132 if (!desc)
1133 return -EINVAL;
1134
Bjorn Andersson051fb702016-06-20 14:28:41 -07001135 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301136 desc->hexagon_mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -07001137 if (!rproc) {
1138 dev_err(&pdev->dev, "failed to allocate rproc\n");
1139 return -ENOMEM;
1140 }
1141
Bjorn Andersson051fb702016-06-20 14:28:41 -07001142 qproc = (struct q6v5 *)rproc->priv;
1143 qproc->dev = &pdev->dev;
1144 qproc->rproc = rproc;
1145 platform_set_drvdata(pdev, qproc);
1146
Bjorn Andersson051fb702016-06-20 14:28:41 -07001147 ret = q6v5_init_mem(qproc, pdev);
1148 if (ret)
1149 goto free_rproc;
1150
1151 ret = q6v5_alloc_memory_region(qproc);
1152 if (ret)
1153 goto free_rproc;
1154
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301155 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1156 desc->proxy_clk_names);
1157 if (ret < 0) {
1158 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001159 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301160 }
1161 qproc->proxy_clk_count = ret;
1162
Sibi Sankar231f67d2018-05-21 22:57:13 +05301163 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1164 desc->reset_clk_names);
1165 if (ret < 0) {
1166 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1167 goto free_rproc;
1168 }
1169 qproc->reset_clk_count = ret;
1170
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301171 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1172 desc->active_clk_names);
1173 if (ret < 0) {
1174 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1175 goto free_rproc;
1176 }
1177 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001178
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301179 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1180 desc->proxy_supply);
1181 if (ret < 0) {
1182 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -07001183 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301184 }
1185 qproc->proxy_reg_count = ret;
1186
1187 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1188 desc->active_supply);
1189 if (ret < 0) {
1190 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1191 goto free_rproc;
1192 }
1193 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001194
1195 ret = q6v5_init_reset(qproc);
1196 if (ret)
1197 goto free_rproc;
1198
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301199 qproc->version = desc->version;
Sibi Sankar231f67d2018-05-21 22:57:13 +05301200 qproc->has_alt_reset = desc->has_alt_reset;
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301201 qproc->need_mem_protection = desc->need_mem_protection;
Bjorn Andersson7d674732018-06-04 13:30:38 -07001202
1203 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1204 qcom_msa_handover);
1205 if (ret)
Bjorn Andersson051fb702016-06-20 14:28:41 -07001206 goto free_rproc;
1207
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301208 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1209 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
Sibi Sankar47254962018-05-21 22:57:14 +05301210 qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001211 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001212 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001213 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001214
Bjorn Andersson051fb702016-06-20 14:28:41 -07001215 ret = rproc_add(rproc);
1216 if (ret)
1217 goto free_rproc;
1218
1219 return 0;
1220
1221free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001222 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001223
1224 return ret;
1225}
1226
1227static int q6v5_remove(struct platform_device *pdev)
1228{
1229 struct q6v5 *qproc = platform_get_drvdata(pdev);
1230
1231 rproc_del(qproc->rproc);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001232
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -07001233 qcom_remove_sysmon_subdev(qproc->sysmon);
Sibi Sankar47254962018-05-21 22:57:14 +05301234 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
Bjorn Andersson4b489212017-01-29 14:05:50 -08001235 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
Bjorn Andersson1e140df2017-07-24 22:56:43 -07001236 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001237 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001238
1239 return 0;
1240}
1241
Sibi Sankar231f67d2018-05-21 22:57:13 +05301242static const struct rproc_hexagon_res sdm845_mss = {
1243 .hexagon_mba_image = "mba.mbn",
1244 .proxy_clk_names = (char*[]){
1245 "xo",
1246 "axis2",
1247 "prng",
1248 NULL
1249 },
1250 .reset_clk_names = (char*[]){
1251 "iface",
1252 "snoc_axi",
1253 NULL
1254 },
1255 .active_clk_names = (char*[]){
1256 "bus",
1257 "mem",
1258 "gpll0_mss",
1259 "mnoc_axi",
1260 NULL
1261 },
1262 .need_mem_protection = true,
1263 .has_alt_reset = true,
1264 .version = MSS_SDM845,
1265};
1266
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301267static const struct rproc_hexagon_res msm8996_mss = {
1268 .hexagon_mba_image = "mba.mbn",
1269 .proxy_clk_names = (char*[]){
1270 "xo",
1271 "pnoc",
1272 NULL
1273 },
1274 .active_clk_names = (char*[]){
1275 "iface",
1276 "bus",
1277 "mem",
1278 "gpll0_mss_clk",
1279 NULL
1280 },
1281 .need_mem_protection = true,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301282 .has_alt_reset = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301283 .version = MSS_MSM8996,
1284};
1285
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301286static const struct rproc_hexagon_res msm8916_mss = {
1287 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301288 .proxy_supply = (struct qcom_mss_reg_res[]) {
1289 {
1290 .supply = "mx",
1291 .uV = 1050000,
1292 },
1293 {
1294 .supply = "cx",
1295 .uA = 100000,
1296 },
1297 {
1298 .supply = "pll",
1299 .uA = 100000,
1300 },
1301 {}
1302 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301303 .proxy_clk_names = (char*[]){
1304 "xo",
1305 NULL
1306 },
1307 .active_clk_names = (char*[]){
1308 "iface",
1309 "bus",
1310 "mem",
1311 NULL
1312 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301313 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301314 .has_alt_reset = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301315 .version = MSS_MSM8916,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301316};
1317
1318static const struct rproc_hexagon_res msm8974_mss = {
1319 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301320 .proxy_supply = (struct qcom_mss_reg_res[]) {
1321 {
1322 .supply = "mx",
1323 .uV = 1050000,
1324 },
1325 {
1326 .supply = "cx",
1327 .uA = 100000,
1328 },
1329 {
1330 .supply = "pll",
1331 .uA = 100000,
1332 },
1333 {}
1334 },
1335 .active_supply = (struct qcom_mss_reg_res[]) {
1336 {
1337 .supply = "mss",
1338 .uV = 1050000,
1339 .uA = 100000,
1340 },
1341 {}
1342 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301343 .proxy_clk_names = (char*[]){
1344 "xo",
1345 NULL
1346 },
1347 .active_clk_names = (char*[]){
1348 "iface",
1349 "bus",
1350 "mem",
1351 NULL
1352 },
Avaneesh Kumar Dwivedi6c5a9dc2017-10-24 21:22:26 +05301353 .need_mem_protection = false,
Sibi Sankar231f67d2018-05-21 22:57:13 +05301354 .has_alt_reset = false,
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301355 .version = MSS_MSM8974,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301356};
1357
Bjorn Andersson051fb702016-06-20 14:28:41 -07001358static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301359 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1360 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1361 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Avaneesh Kumar Dwivedi9f058fa2017-10-24 21:22:27 +05301362 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
Sibi Sankar231f67d2018-05-21 22:57:13 +05301363 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001364 { },
1365};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001366MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001367
1368static struct platform_driver q6v5_driver = {
1369 .probe = q6v5_probe,
1370 .remove = q6v5_remove,
1371 .driver = {
1372 .name = "qcom-q6v5-pil",
1373 .of_match_table = q6v5_of_match,
1374 },
1375};
1376module_platform_driver(q6v5_driver);
1377
1378MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1379MODULE_LICENSE("GPL v2");