blob: f52f371292ac3a9f7bea64d002b3f19470804f64 [file] [log] [blame]
Thomas Gleixnerfcaf2032019-05-27 08:55:08 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Shawn Guo9fbbe682011-09-06 14:39:44 +08002/*
3 * Copyright 2011 Freescale Semiconductor, Inc.
4 * Copyright 2011 Linaro Ltd.
Shawn Guo9fbbe682011-09-06 14:39:44 +08005 */
6
7#include <linux/init.h>
8#include <linux/io.h>
9#include <linux/of.h>
10#include <linux/of_address.h>
Philipp Zabel02985b92013-03-28 17:35:19 +010011#include <linux/reset-controller.h>
Will Deaconeaa142c2011-08-09 12:24:07 +010012#include <linux/smp.h>
Will Deaconeb504392012-01-20 12:01:12 +010013#include <asm/smp_plat.h>
Fabio Estevam09898572013-03-25 09:20:43 -030014#include "common.h"
Shawn Guo9fbbe682011-09-06 14:39:44 +080015
16#define SRC_SCR 0x000
17#define SRC_GPR1 0x020
Shawn Guo0575fb72011-12-09 00:51:26 +010018#define BP_SRC_SCR_WARM_RESET_ENABLE 0
Philipp Zabel02985b92013-03-28 17:35:19 +010019#define BP_SRC_SCR_SW_GPU_RST 1
20#define BP_SRC_SCR_SW_VPU_RST 2
21#define BP_SRC_SCR_SW_IPU1_RST 3
22#define BP_SRC_SCR_SW_OPEN_VG_RST 4
23#define BP_SRC_SCR_SW_IPU2_RST 12
Shawn Guo9fbbe682011-09-06 14:39:44 +080024#define BP_SRC_SCR_CORE1_RST 14
25#define BP_SRC_SCR_CORE1_ENABLE 22
26
27static void __iomem *src_base;
Philipp Zabel02985b92013-03-28 17:35:19 +010028static DEFINE_SPINLOCK(scr_lock);
29
30static const int sw_reset_bits[5] = {
31 BP_SRC_SCR_SW_GPU_RST,
32 BP_SRC_SCR_SW_VPU_RST,
33 BP_SRC_SCR_SW_IPU1_RST,
34 BP_SRC_SCR_SW_OPEN_VG_RST,
35 BP_SRC_SCR_SW_IPU2_RST
36};
37
38static int imx_src_reset_module(struct reset_controller_dev *rcdev,
39 unsigned long sw_reset_idx)
40{
41 unsigned long timeout;
42 unsigned long flags;
43 int bit;
44 u32 val;
45
Philipp Zabel02985b92013-03-28 17:35:19 +010046 if (sw_reset_idx >= ARRAY_SIZE(sw_reset_bits))
47 return -EINVAL;
48
49 bit = 1 << sw_reset_bits[sw_reset_idx];
50
51 spin_lock_irqsave(&scr_lock, flags);
52 val = readl_relaxed(src_base + SRC_SCR);
53 val |= bit;
54 writel_relaxed(val, src_base + SRC_SCR);
55 spin_unlock_irqrestore(&scr_lock, flags);
56
57 timeout = jiffies + msecs_to_jiffies(1000);
58 while (readl(src_base + SRC_SCR) & bit) {
59 if (time_after(jiffies, timeout))
60 return -ETIME;
61 cpu_relax();
62 }
63
64 return 0;
65}
66
Philipp Zabeld2443b22016-02-25 10:44:41 +010067static const struct reset_control_ops imx_src_ops = {
Philipp Zabel02985b92013-03-28 17:35:19 +010068 .reset = imx_src_reset_module,
69};
70
71static struct reset_controller_dev imx_reset_controller = {
72 .ops = &imx_src_ops,
73 .nr_resets = ARRAY_SIZE(sw_reset_bits),
74};
Shawn Guo9fbbe682011-09-06 14:39:44 +080075
76void imx_enable_cpu(int cpu, bool enable)
77{
78 u32 mask, val;
79
Will Deaconeaa142c2011-08-09 12:24:07 +010080 cpu = cpu_logical_map(cpu);
Shawn Guo9fbbe682011-09-06 14:39:44 +080081 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
Philipp Zabel02985b92013-03-28 17:35:19 +010082 spin_lock(&scr_lock);
Shawn Guo9fbbe682011-09-06 14:39:44 +080083 val = readl_relaxed(src_base + SRC_SCR);
84 val = enable ? val | mask : val & ~mask;
Shawn Guo6050d182013-10-09 15:54:31 +080085 val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
Shawn Guo9fbbe682011-09-06 14:39:44 +080086 writel_relaxed(val, src_base + SRC_SCR);
Philipp Zabel02985b92013-03-28 17:35:19 +010087 spin_unlock(&scr_lock);
Shawn Guo9fbbe682011-09-06 14:39:44 +080088}
89
90void imx_set_cpu_jump(int cpu, void *jump_addr)
91{
Will Deaconeaa142c2011-08-09 12:24:07 +010092 cpu = cpu_logical_map(cpu);
Florian Fainelli64fc2a92017-01-15 03:59:29 +010093 writel_relaxed(__pa_symbol(jump_addr),
Shawn Guo9fbbe682011-09-06 14:39:44 +080094 src_base + SRC_GPR1 + cpu * 8);
95}
96
Shawn Guo2f3edfd2013-03-26 16:46:07 +080097u32 imx_get_cpu_arg(int cpu)
98{
99 cpu = cpu_logical_map(cpu);
100 return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
101}
102
103void imx_set_cpu_arg(int cpu, u32 arg)
104{
105 cpu = cpu_logical_map(cpu);
106 writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
107}
108
Shawn Guo9fbbe682011-09-06 14:39:44 +0800109void __init imx_src_init(void)
110{
111 struct device_node *np;
Shawn Guo0575fb72011-12-09 00:51:26 +0100112 u32 val;
Shawn Guo9fbbe682011-09-06 14:39:44 +0800113
Philipp Zabelbd3d9242013-03-28 17:35:22 +0100114 np = of_find_compatible_node(NULL, NULL, "fsl,imx51-src");
115 if (!np)
116 return;
Shawn Guo9fbbe682011-09-06 14:39:44 +0800117 src_base = of_iomap(np, 0);
118 WARN_ON(!src_base);
Shawn Guo0575fb72011-12-09 00:51:26 +0100119
Philipp Zabel02985b92013-03-28 17:35:19 +0100120 imx_reset_controller.of_node = np;
Arnd Bergmann5c5f0422013-04-30 14:58:31 +0200121 if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
122 reset_controller_register(&imx_reset_controller);
Philipp Zabel02985b92013-03-28 17:35:19 +0100123
Shawn Guo0575fb72011-12-09 00:51:26 +0100124 /*
125 * force warm reset sources to generate cold reset
126 * for a more reliable restart
127 */
Philipp Zabel02985b92013-03-28 17:35:19 +0100128 spin_lock(&scr_lock);
Shawn Guo0575fb72011-12-09 00:51:26 +0100129 val = readl_relaxed(src_base + SRC_SCR);
130 val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE);
131 writel_relaxed(val, src_base + SRC_SCR);
Philipp Zabel02985b92013-03-28 17:35:19 +0100132 spin_unlock(&scr_lock);
Shawn Guo9fbbe682011-09-06 14:39:44 +0800133}