blob: 80bbafee98463ccc65771a47046a7bc5320c1c01 [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Bjorn Anderssonaed361a2016-08-12 18:18:59 -07002/*
3 * Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader
4 *
5 * Copyright (C) 2016 Linaro Ltd
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bjorn Anderssonaed361a2016-08-12 18:18:59 -07008 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/firmware.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/io.h>
17#include <linux/of_address.h>
18#include <linux/of_device.h>
19#include <linux/platform_device.h>
Stephan Gerhold858bce92020-09-16 12:41:33 +020020#include <linux/pm_domain.h>
21#include <linux/pm_runtime.h>
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070022#include <linux/qcom_scm.h>
23#include <linux/regulator/consumer.h>
24#include <linux/remoteproc.h>
Bjorn Andersson2aad40d2017-01-27 03:12:57 -080025#include <linux/soc/qcom/mdt_loader.h>
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070026#include <linux/soc/qcom/smem.h>
27#include <linux/soc/qcom/smem_state.h>
28
Bjorn Anderssonbde440e2017-01-27 02:28:32 -080029#include "qcom_common.h"
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070030#include "remoteproc_internal.h"
Bjorn Anderssond4c78d22020-06-22 12:19:40 -070031#include "qcom_pil_info.h"
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070032#include "qcom_wcnss.h"
33
34#define WCNSS_CRASH_REASON_SMEM 422
35#define WCNSS_FIRMWARE_NAME "wcnss.mdt"
36#define WCNSS_PAS_ID 6
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -070037#define WCNSS_SSCTL_ID 0x13
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070038
39#define WCNSS_SPARE_NVBIN_DLND BIT(25)
40
41#define WCNSS_PMU_IRIS_XO_CFG BIT(3)
42#define WCNSS_PMU_IRIS_XO_EN BIT(4)
43#define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5)
44#define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
45
46#define WCNSS_PMU_IRIS_RESET BIT(7)
47#define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
48#define WCNSS_PMU_IRIS_XO_READ BIT(9)
49#define WCNSS_PMU_IRIS_XO_READ_STS BIT(10)
50
51#define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1)
52#define WCNSS_PMU_XO_MODE_19p2 0
53#define WCNSS_PMU_XO_MODE_48 3
54
Stephan Gerhold858bce92020-09-16 12:41:33 +020055#define WCNSS_MAX_PDS 2
56
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070057struct wcnss_data {
58 size_t pmu_offset;
59 size_t spare_offset;
60
Stephan Gerhold858bce92020-09-16 12:41:33 +020061 const char *pd_names[WCNSS_MAX_PDS];
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070062 const struct wcnss_vreg_info *vregs;
Stephan Gerhold858bce92020-09-16 12:41:33 +020063 size_t num_vregs, num_pd_vregs;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070064};
65
66struct qcom_wcnss {
67 struct device *dev;
68 struct rproc *rproc;
69
70 void __iomem *pmu_cfg;
71 void __iomem *spare_out;
72
73 bool use_48mhz_xo;
74
75 int wdog_irq;
76 int fatal_irq;
77 int ready_irq;
78 int handover_irq;
79 int stop_ack_irq;
80
81 struct qcom_smem_state *state;
82 unsigned stop_bit;
83
84 struct mutex iris_lock;
85 struct qcom_iris *iris;
86
Stephan Gerhold858bce92020-09-16 12:41:33 +020087 struct device *pds[WCNSS_MAX_PDS];
88 size_t num_pds;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -070089 struct regulator_bulk_data *vregs;
90 size_t num_vregs;
91
92 struct completion start_done;
93 struct completion stop_done;
94
95 phys_addr_t mem_phys;
96 phys_addr_t mem_reloc;
97 void *mem_region;
98 size_t mem_size;
Bjorn Andersson5a856bc2016-10-19 19:40:04 -070099
Bjorn Anderssonb90fcfc2017-01-27 07:04:54 -0800100 struct qcom_rproc_subdev smd_subdev;
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700101 struct qcom_sysmon *sysmon;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700102};
103
104static const struct wcnss_data riva_data = {
105 .pmu_offset = 0x28,
106 .spare_offset = 0xb4,
107
108 .vregs = (struct wcnss_vreg_info[]) {
109 { "vddmx", 1050000, 1150000, 0 },
110 { "vddcx", 1050000, 1150000, 0 },
111 { "vddpx", 1800000, 1800000, 0 },
112 },
113 .num_vregs = 3,
114};
115
116static const struct wcnss_data pronto_v1_data = {
117 .pmu_offset = 0x1004,
118 .spare_offset = 0x1088,
119
Stephan Gerhold858bce92020-09-16 12:41:33 +0200120 .pd_names = { "mx", "cx" },
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700121 .vregs = (struct wcnss_vreg_info[]) {
122 { "vddmx", 950000, 1150000, 0 },
123 { "vddcx", .super_turbo = true},
124 { "vddpx", 1800000, 1800000, 0 },
125 },
Stephan Gerhold858bce92020-09-16 12:41:33 +0200126 .num_pd_vregs = 2,
127 .num_vregs = 1,
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700128};
129
130static const struct wcnss_data pronto_v2_data = {
131 .pmu_offset = 0x1004,
132 .spare_offset = 0x1088,
133
Stephan Gerhold858bce92020-09-16 12:41:33 +0200134 .pd_names = { "mx", "cx" },
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700135 .vregs = (struct wcnss_vreg_info[]) {
136 { "vddmx", 1287500, 1287500, 0 },
137 { "vddcx", .super_turbo = true },
138 { "vddpx", 1800000, 1800000, 0 },
139 },
Stephan Gerhold858bce92020-09-16 12:41:33 +0200140 .num_pd_vregs = 2,
141 .num_vregs = 1,
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700142};
143
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700144static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
145{
146 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
Bjorn Anderssond4c78d22020-06-22 12:19:40 -0700147 int ret;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700148
Bjorn Anderssond4c78d22020-06-22 12:19:40 -0700149 ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
150 wcnss->mem_region, wcnss->mem_phys,
151 wcnss->mem_size, &wcnss->mem_reloc);
152 if (ret)
153 return ret;
154
155 qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
156
157 return 0;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700158}
159
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700160static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
161{
162 u32 val;
163
164 /* Indicate NV download capability */
165 val = readl(wcnss->spare_out);
166 val |= WCNSS_SPARE_NVBIN_DLND;
167 writel(val, wcnss->spare_out);
168}
169
170static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
171{
172 u32 val;
173
174 /* Clear PMU cfg register */
175 writel(0, wcnss->pmu_cfg);
176
177 val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN;
178 writel(val, wcnss->pmu_cfg);
179
180 /* Clear XO_MODE */
181 val &= ~WCNSS_PMU_XO_MODE_MASK;
182 if (wcnss->use_48mhz_xo)
183 val |= WCNSS_PMU_XO_MODE_48 << 1;
184 else
185 val |= WCNSS_PMU_XO_MODE_19p2 << 1;
186 writel(val, wcnss->pmu_cfg);
187
188 /* Reset IRIS */
189 val |= WCNSS_PMU_IRIS_RESET;
190 writel(val, wcnss->pmu_cfg);
191
192 /* Wait for PMU.iris_reg_reset_sts */
193 while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS)
194 cpu_relax();
195
196 /* Clear IRIS reset */
197 val &= ~WCNSS_PMU_IRIS_RESET;
198 writel(val, wcnss->pmu_cfg);
199
200 /* Start IRIS XO configuration */
201 val |= WCNSS_PMU_IRIS_XO_CFG;
202 writel(val, wcnss->pmu_cfg);
203
204 /* Wait for XO configuration to finish */
205 while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS)
206 cpu_relax();
207
208 /* Stop IRIS XO configuration */
209 val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP;
210 val &= ~WCNSS_PMU_IRIS_XO_CFG;
211 writel(val, wcnss->pmu_cfg);
212
213 /* Add some delay for XO to settle */
214 msleep(20);
215}
216
217static int wcnss_start(struct rproc *rproc)
218{
219 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
Stephan Gerhold858bce92020-09-16 12:41:33 +0200220 int ret, i;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700221
222 mutex_lock(&wcnss->iris_lock);
223 if (!wcnss->iris) {
224 dev_err(wcnss->dev, "no iris registered\n");
225 ret = -EINVAL;
226 goto release_iris_lock;
227 }
228
Stephan Gerhold858bce92020-09-16 12:41:33 +0200229 for (i = 0; i < wcnss->num_pds; i++) {
230 dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX);
231 ret = pm_runtime_get_sync(wcnss->pds[i]);
232 if (ret < 0) {
233 pm_runtime_put_noidle(wcnss->pds[i]);
234 goto disable_pds;
235 }
236 }
237
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700238 ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
239 if (ret)
Stephan Gerhold858bce92020-09-16 12:41:33 +0200240 goto disable_pds;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700241
242 ret = qcom_iris_enable(wcnss->iris);
243 if (ret)
244 goto disable_regulators;
245
246 wcnss_indicate_nv_download(wcnss);
247 wcnss_configure_iris(wcnss);
248
249 ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID);
250 if (ret) {
251 dev_err(wcnss->dev,
252 "failed to authenticate image and release reset\n");
253 goto disable_iris;
254 }
255
256 ret = wait_for_completion_timeout(&wcnss->start_done,
257 msecs_to_jiffies(5000));
258 if (wcnss->ready_irq > 0 && ret == 0) {
259 /* We have a ready_irq, but it didn't fire in time. */
260 dev_err(wcnss->dev, "start timed out\n");
261 qcom_scm_pas_shutdown(WCNSS_PAS_ID);
262 ret = -ETIMEDOUT;
263 goto disable_iris;
264 }
265
266 ret = 0;
267
268disable_iris:
269 qcom_iris_disable(wcnss->iris);
270disable_regulators:
271 regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
Stephan Gerhold858bce92020-09-16 12:41:33 +0200272disable_pds:
273 for (i--; i >= 0; i--) {
274 pm_runtime_put(wcnss->pds[i]);
275 dev_pm_genpd_set_performance_state(wcnss->pds[i], 0);
276 }
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700277release_iris_lock:
278 mutex_unlock(&wcnss->iris_lock);
279
280 return ret;
281}
282
283static int wcnss_stop(struct rproc *rproc)
284{
285 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
286 int ret;
287
288 if (wcnss->state) {
289 qcom_smem_state_update_bits(wcnss->state,
290 BIT(wcnss->stop_bit),
291 BIT(wcnss->stop_bit));
292
293 ret = wait_for_completion_timeout(&wcnss->stop_done,
294 msecs_to_jiffies(5000));
295 if (ret == 0)
296 dev_err(wcnss->dev, "timed out on wait\n");
297
298 qcom_smem_state_update_bits(wcnss->state,
299 BIT(wcnss->stop_bit),
300 0);
301 }
302
303 ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID);
304 if (ret)
305 dev_err(wcnss->dev, "failed to shutdown: %d\n", ret);
306
307 return ret;
308}
309
Peng Fan40df0a92021-03-06 19:24:19 +0800310static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700311{
312 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
313 int offset;
314
315 offset = da - wcnss->mem_reloc;
316 if (offset < 0 || offset + len > wcnss->mem_size)
317 return NULL;
318
319 return wcnss->mem_region + offset;
320}
321
322static const struct rproc_ops wcnss_ops = {
323 .start = wcnss_start,
324 .stop = wcnss_stop,
325 .da_to_va = wcnss_da_to_va,
Sarangdhar Joshidcb57ed2018-01-05 16:04:20 -0800326 .parse_fw = qcom_register_dump_segments,
Bjorn Andersson0f21f9c2018-01-05 15:58:01 -0800327 .load = wcnss_load,
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700328};
329
330static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev)
331{
332 struct qcom_wcnss *wcnss = dev;
333
334 rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG);
335
336 return IRQ_HANDLED;
337}
338
339static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev)
340{
341 struct qcom_wcnss *wcnss = dev;
342 size_t len;
343 char *msg;
344
345 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len);
346 if (!IS_ERR(msg) && len > 0 && msg[0])
347 dev_err(wcnss->dev, "fatal error received: %s\n", msg);
348
349 rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR);
350
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700351 return IRQ_HANDLED;
352}
353
354static irqreturn_t wcnss_ready_interrupt(int irq, void *dev)
355{
356 struct qcom_wcnss *wcnss = dev;
357
358 complete(&wcnss->start_done);
359
360 return IRQ_HANDLED;
361}
362
363static irqreturn_t wcnss_handover_interrupt(int irq, void *dev)
364{
365 /*
366 * XXX: At this point we're supposed to release the resources that we
367 * have been holding on behalf of the WCNSS. Unfortunately this
368 * interrupt comes way before the other side seems to be done.
369 *
370 * So we're currently relying on the ready interrupt firing later then
371 * this and we just disable the resources at the end of wcnss_start().
372 */
373
374 return IRQ_HANDLED;
375}
376
377static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
378{
379 struct qcom_wcnss *wcnss = dev;
380
381 complete(&wcnss->stop_done);
382
383 return IRQ_HANDLED;
384}
385
Stephan Gerhold858bce92020-09-16 12:41:33 +0200386static int wcnss_init_pds(struct qcom_wcnss *wcnss,
387 const char * const pd_names[WCNSS_MAX_PDS])
388{
389 int i, ret;
390
391 for (i = 0; i < WCNSS_MAX_PDS; i++) {
392 if (!pd_names[i])
393 break;
394
395 wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]);
396 if (IS_ERR_OR_NULL(wcnss->pds[i])) {
397 ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA;
398 for (i--; i >= 0; i--)
399 dev_pm_domain_detach(wcnss->pds[i], false);
400 return ret;
401 }
402 }
403 wcnss->num_pds = i;
404
405 return 0;
406}
407
408static void wcnss_release_pds(struct qcom_wcnss *wcnss)
409{
410 int i;
411
412 for (i = 0; i < wcnss->num_pds; i++)
413 dev_pm_domain_detach(wcnss->pds[i], false);
414}
415
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700416static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
417 const struct wcnss_vreg_info *info,
Stephan Gerhold858bce92020-09-16 12:41:33 +0200418 int num_vregs, int num_pd_vregs)
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700419{
420 struct regulator_bulk_data *bulk;
421 int ret;
422 int i;
423
Stephan Gerhold858bce92020-09-16 12:41:33 +0200424 /*
425 * If attaching the power domains suceeded we can skip requesting
426 * the regulators for the power domains. For old device trees we need to
427 * reserve extra space to manage them through the regulator interface.
428 */
429 if (wcnss->num_pds)
430 info += num_pd_vregs;
431 else
432 num_vregs += num_pd_vregs;
433
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700434 bulk = devm_kcalloc(wcnss->dev,
435 num_vregs, sizeof(struct regulator_bulk_data),
436 GFP_KERNEL);
437 if (!bulk)
438 return -ENOMEM;
439
440 for (i = 0; i < num_vregs; i++)
441 bulk[i].supply = info[i].name;
442
443 ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
444 if (ret)
445 return ret;
446
447 for (i = 0; i < num_vregs; i++) {
448 if (info[i].max_voltage)
449 regulator_set_voltage(bulk[i].consumer,
450 info[i].min_voltage,
451 info[i].max_voltage);
452
453 if (info[i].load_uA)
454 regulator_set_load(bulk[i].consumer, info[i].load_uA);
455 }
456
457 wcnss->vregs = bulk;
458 wcnss->num_vregs = num_vregs;
459
460 return 0;
461}
462
463static int wcnss_request_irq(struct qcom_wcnss *wcnss,
464 struct platform_device *pdev,
465 const char *name,
466 bool optional,
467 irq_handler_t thread_fn)
468{
469 int ret;
470
471 ret = platform_get_irq_byname(pdev, name);
472 if (ret < 0 && optional) {
473 dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name);
474 return 0;
475 } else if (ret < 0) {
476 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
477 return ret;
478 }
479
480 ret = devm_request_threaded_irq(&pdev->dev, ret,
481 NULL, thread_fn,
482 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
483 "wcnss", wcnss);
484 if (ret)
485 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
486
487 return ret;
488}
489
490static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
491{
492 struct device_node *node;
493 struct resource r;
494 int ret;
495
496 node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
497 if (!node) {
498 dev_err(wcnss->dev, "no memory-region specified\n");
499 return -EINVAL;
500 }
501
502 ret = of_address_to_resource(node, 0, &r);
503 if (ret)
504 return ret;
505
506 wcnss->mem_phys = wcnss->mem_reloc = r.start;
507 wcnss->mem_size = resource_size(&r);
508 wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
509 if (!wcnss->mem_region) {
510 dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
511 &r.start, wcnss->mem_size);
512 return -EBUSY;
513 }
514
515 return 0;
516}
517
518static int wcnss_probe(struct platform_device *pdev)
519{
Bjorn Andersson48073932021-03-11 16:24:41 -0800520 const char *fw_name = WCNSS_FIRMWARE_NAME;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700521 const struct wcnss_data *data;
522 struct qcom_wcnss *wcnss;
523 struct resource *res;
524 struct rproc *rproc;
525 void __iomem *mmio;
526 int ret;
527
528 data = of_device_get_match_data(&pdev->dev);
529
530 if (!qcom_scm_is_available())
531 return -EPROBE_DEFER;
532
533 if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) {
534 dev_err(&pdev->dev, "PAS is not available for WCNSS\n");
535 return -ENXIO;
536 }
537
Bjorn Andersson48073932021-03-11 16:24:41 -0800538 ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
539 &fw_name);
540 if (ret < 0 && ret != -EINVAL)
541 return ret;
542
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700543 rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
Bjorn Andersson48073932021-03-11 16:24:41 -0800544 fw_name, sizeof(*wcnss));
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700545 if (!rproc) {
546 dev_err(&pdev->dev, "unable to allocate remoteproc\n");
547 return -ENOMEM;
548 }
Clement Leger3898fc92020-04-10 12:24:33 +0200549 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700550
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700551 wcnss = (struct qcom_wcnss *)rproc->priv;
552 wcnss->dev = &pdev->dev;
553 wcnss->rproc = rproc;
554 platform_set_drvdata(pdev, wcnss);
555
556 init_completion(&wcnss->start_done);
557 init_completion(&wcnss->stop_done);
558
559 mutex_init(&wcnss->iris_lock);
560
561 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu");
562 mmio = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunf4e1f9b2016-08-21 14:39:30 +0000563 if (IS_ERR(mmio)) {
564 ret = PTR_ERR(mmio);
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700565 goto free_rproc;
Yang Li9a1d2712021-02-03 10:46:42 +0800566 }
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700567
568 ret = wcnss_alloc_memory_region(wcnss);
569 if (ret)
570 goto free_rproc;
571
572 wcnss->pmu_cfg = mmio + data->pmu_offset;
573 wcnss->spare_out = mmio + data->spare_offset;
574
Stephan Gerhold858bce92020-09-16 12:41:33 +0200575 /*
576 * We might need to fallback to regulators instead of power domains
577 * for old device trees. Don't report an error in that case.
578 */
579 ret = wcnss_init_pds(wcnss, data->pd_names);
580 if (ret && (ret != -ENODATA || !data->num_pd_vregs))
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700581 goto free_rproc;
582
Stephan Gerhold858bce92020-09-16 12:41:33 +0200583 ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
584 data->num_pd_vregs);
585 if (ret)
586 goto detach_pds;
587
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700588 ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
589 if (ret < 0)
Stephan Gerhold858bce92020-09-16 12:41:33 +0200590 goto detach_pds;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700591 wcnss->wdog_irq = ret;
592
593 ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
594 if (ret < 0)
Stephan Gerhold858bce92020-09-16 12:41:33 +0200595 goto detach_pds;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700596 wcnss->fatal_irq = ret;
597
598 ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
599 if (ret < 0)
Stephan Gerhold858bce92020-09-16 12:41:33 +0200600 goto detach_pds;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700601 wcnss->ready_irq = ret;
602
603 ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
604 if (ret < 0)
Stephan Gerhold858bce92020-09-16 12:41:33 +0200605 goto detach_pds;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700606 wcnss->handover_irq = ret;
607
608 ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
609 if (ret < 0)
Stephan Gerhold858bce92020-09-16 12:41:33 +0200610 goto detach_pds;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700611 wcnss->stop_ack_irq = ret;
612
613 if (wcnss->stop_ack_irq) {
Stephan Gerhold81311562021-06-18 13:15:56 +0200614 wcnss->state = devm_qcom_smem_state_get(&pdev->dev, "stop",
615 &wcnss->stop_bit);
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700616 if (IS_ERR(wcnss->state)) {
617 ret = PTR_ERR(wcnss->state);
Stephan Gerhold858bce92020-09-16 12:41:33 +0200618 goto detach_pds;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700619 }
620 }
621
Bjorn Anderssonb90fcfc2017-01-27 07:04:54 -0800622 qcom_add_smd_subdev(rproc, &wcnss->smd_subdev);
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700623 wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
Sibi Sankar027045a2019-01-08 15:53:43 +0530624 if (IS_ERR(wcnss->sysmon)) {
625 ret = PTR_ERR(wcnss->sysmon);
Stephan Gerhold858bce92020-09-16 12:41:33 +0200626 goto detach_pds;
Sibi Sankar027045a2019-01-08 15:53:43 +0530627 }
Bjorn Andersson5a856bc2016-10-19 19:40:04 -0700628
Bjorn Andersson1fcef982021-03-11 16:22:51 -0800629 wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
630 if (IS_ERR(wcnss->iris)) {
631 ret = PTR_ERR(wcnss->iris);
632 goto detach_pds;
633 }
634
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700635 ret = rproc_add(rproc);
636 if (ret)
Bjorn Andersson1fcef982021-03-11 16:22:51 -0800637 goto remove_iris;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700638
Bjorn Andersson1fcef982021-03-11 16:22:51 -0800639 return 0;
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700640
Bjorn Andersson1fcef982021-03-11 16:22:51 -0800641remove_iris:
642 qcom_iris_remove(wcnss->iris);
Stephan Gerhold858bce92020-09-16 12:41:33 +0200643detach_pds:
644 wcnss_release_pds(wcnss);
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700645free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -0700646 rproc_free(rproc);
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700647
648 return ret;
649}
650
651static int wcnss_remove(struct platform_device *pdev)
652{
653 struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
654
Bjorn Andersson1fcef982021-03-11 16:22:51 -0800655 qcom_iris_remove(wcnss->iris);
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700656
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700657 rproc_del(wcnss->rproc);
Bjorn Anderssonb90fcfc2017-01-27 07:04:54 -0800658
Bjorn Andersson1fb82ee2017-08-27 21:51:38 -0700659 qcom_remove_sysmon_subdev(wcnss->sysmon);
Bjorn Anderssonb90fcfc2017-01-27 07:04:54 -0800660 qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
Stephan Gerhold858bce92020-09-16 12:41:33 +0200661 wcnss_release_pds(wcnss);
Bjorn Andersson433c0e02016-10-02 17:46:38 -0700662 rproc_free(wcnss->rproc);
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700663
664 return 0;
665}
666
667static const struct of_device_id wcnss_of_match[] = {
668 { .compatible = "qcom,riva-pil", &riva_data },
669 { .compatible = "qcom,pronto-v1-pil", &pronto_v1_data },
670 { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
671 { },
672};
Javier Martinez Canillas4958aab2016-10-18 18:24:20 -0300673MODULE_DEVICE_TABLE(of, wcnss_of_match);
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700674
675static struct platform_driver wcnss_driver = {
676 .probe = wcnss_probe,
677 .remove = wcnss_remove,
678 .driver = {
679 .name = "qcom-wcnss-pil",
680 .of_match_table = wcnss_of_match,
681 },
682};
683
Bjorn Andersson1fcef982021-03-11 16:22:51 -0800684module_platform_driver(wcnss_driver);
Bjorn Andersson6de1a502016-11-03 19:37:25 -0700685
Colin Ian Kingfff7fca2018-11-26 14:27:35 +0000686MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
Bjorn Anderssonaed361a2016-08-12 18:18:59 -0700687MODULE_LICENSE("GPL v2");