blob: b80dc9d5855c9a041957cb7f067d072d868466b8 [file] [log] [blame]
Robert Jarzmikbda00302014-07-30 22:50:59 +02001/*
2 * Marvell PXA family clocks
3 *
4 * Copyright (C) 2014 Robert Jarzmik
5 *
6 * Common clock code for PXA clocks ("CKEN" type clocks + DT)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 */
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/clkdev.h>
16#include <linux/of.h>
17
18#include <dt-bindings/clock/pxa-clock.h>
19#include "clk-pxa.h"
20
Robert Jarzmik9fe69422016-11-02 22:33:06 +010021#define KHz 1000
22#define MHz (1000 * 1000)
23
24#define MDREFR_K0DB4 (1 << 29) /* SDCLK0 Divide by 4 Control/Status */
25#define MDREFR_K2FREE (1 << 25) /* SDRAM Free-Running Control */
26#define MDREFR_K1FREE (1 << 24) /* SDRAM Free-Running Control */
27#define MDREFR_K0FREE (1 << 23) /* SDRAM Free-Running Control */
28#define MDREFR_SLFRSH (1 << 22) /* SDRAM Self-Refresh Control/Status */
29#define MDREFR_APD (1 << 20) /* SDRAM/SSRAM Auto-Power-Down Enable */
30#define MDREFR_K2DB2 (1 << 19) /* SDCLK2 Divide by 2 Control/Status */
31#define MDREFR_K2RUN (1 << 18) /* SDCLK2 Run Control/Status */
32#define MDREFR_K1DB2 (1 << 17) /* SDCLK1 Divide by 2 Control/Status */
33#define MDREFR_K1RUN (1 << 16) /* SDCLK1 Run Control/Status */
34#define MDREFR_E1PIN (1 << 15) /* SDCKE1 Level Control/Status */
35#define MDREFR_K0DB2 (1 << 14) /* SDCLK0 Divide by 2 Control/Status */
36#define MDREFR_K0RUN (1 << 13) /* SDCLK0 Run Control/Status */
37#define MDREFR_E0PIN (1 << 12) /* SDCKE0 Level Control/Status */
38#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
39#define MDREFR_DRI_MASK 0xFFF
40
Stephen Boyd84558ff2016-11-08 14:47:56 -080041static DEFINE_SPINLOCK(pxa_clk_lock);
Robert Jarzmikbda00302014-07-30 22:50:59 +020042
43static struct clk *pxa_clocks[CLK_MAX];
44static struct clk_onecell_data onecell_data = {
45 .clks = pxa_clocks,
46 .clk_num = CLK_MAX,
47};
48
Robert Jarzmik14dd5b02014-10-07 01:07:58 +020049struct pxa_clk {
50 struct clk_hw hw;
51 struct clk_fixed_factor lp;
52 struct clk_fixed_factor hp;
53 struct clk_gate gate;
54 bool (*is_in_low_power)(void);
55};
56
57#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
Robert Jarzmikbda00302014-07-30 22:50:59 +020058
59static unsigned long cken_recalc_rate(struct clk_hw *hw,
60 unsigned long parent_rate)
61{
Robert Jarzmik14dd5b02014-10-07 01:07:58 +020062 struct pxa_clk *pclk = to_pxa_clk(hw);
Robert Jarzmikbda00302014-07-30 22:50:59 +020063 struct clk_fixed_factor *fix;
64
65 if (!pclk->is_in_low_power || pclk->is_in_low_power())
66 fix = &pclk->lp;
67 else
68 fix = &pclk->hp;
Javier Martinez Canillas4e907ef2015-02-12 14:58:30 +010069 __clk_hw_set_clk(&fix->hw, hw);
Robert Jarzmikbda00302014-07-30 22:50:59 +020070 return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
71}
72
73static struct clk_ops cken_rate_ops = {
74 .recalc_rate = cken_recalc_rate,
75};
76
77static u8 cken_get_parent(struct clk_hw *hw)
78{
Robert Jarzmik14dd5b02014-10-07 01:07:58 +020079 struct pxa_clk *pclk = to_pxa_clk(hw);
Robert Jarzmikbda00302014-07-30 22:50:59 +020080
81 if (!pclk->is_in_low_power)
82 return 0;
83 return pclk->is_in_low_power() ? 0 : 1;
84}
85
86static struct clk_ops cken_mux_ops = {
87 .get_parent = cken_get_parent,
88 .set_parent = dummy_clk_set_parent,
89};
90
91void __init clkdev_pxa_register(int ckid, const char *con_id,
92 const char *dev_id, struct clk *clk)
93{
94 if (!IS_ERR(clk) && (ckid != CLK_NONE))
95 pxa_clocks[ckid] = clk;
96 if (!IS_ERR(clk))
97 clk_register_clkdev(clk, con_id, dev_id);
98}
99
Robert Jarzmik14dd5b02014-10-07 01:07:58 +0200100int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
Robert Jarzmikbda00302014-07-30 22:50:59 +0200101{
102 int i;
Robert Jarzmik14dd5b02014-10-07 01:07:58 +0200103 struct pxa_clk *pxa_clk;
Robert Jarzmikbda00302014-07-30 22:50:59 +0200104 struct clk *clk;
105
106 for (i = 0; i < nb_clks; i++) {
Robert Jarzmik14dd5b02014-10-07 01:07:58 +0200107 pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
108 pxa_clk->is_in_low_power = clks[i].is_in_low_power;
109 pxa_clk->lp = clks[i].lp;
110 pxa_clk->hp = clks[i].hp;
111 pxa_clk->gate = clks[i].gate;
Stephen Boyd84558ff2016-11-08 14:47:56 -0800112 pxa_clk->gate.lock = &pxa_clk_lock;
Robert Jarzmik14dd5b02014-10-07 01:07:58 +0200113 clk = clk_register_composite(NULL, clks[i].name,
114 clks[i].parent_names, 2,
115 &pxa_clk->hw, &cken_mux_ops,
116 &pxa_clk->hw, &cken_rate_ops,
117 &pxa_clk->gate.hw, &clk_gate_ops,
118 clks[i].flags);
119 clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
120 clks[i].dev_id, clk);
Robert Jarzmikbda00302014-07-30 22:50:59 +0200121 }
122 return 0;
123}
124
Robert Jarzmik6f8a4442014-10-07 01:07:59 +0200125void __init clk_pxa_dt_common_init(struct device_node *np)
Robert Jarzmikbda00302014-07-30 22:50:59 +0200126{
127 of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
128}
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100129
130void pxa2xx_core_turbo_switch(bool on)
131{
132 unsigned long flags;
133 unsigned int unused, clkcfg;
134
135 local_irq_save(flags);
136
137 asm("mrc p14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
138 clkcfg &= ~CLKCFG_TURBO & ~CLKCFG_HALFTURBO;
139 if (on)
140 clkcfg |= CLKCFG_TURBO;
141 clkcfg |= CLKCFG_FCS;
142
143 asm volatile(
144 " b 2f\n"
145 " .align 5\n"
146 "1: mcr p14, 0, %1, c6, c0, 0\n"
147 " b 3f\n"
148 "2: b 1b\n"
149 "3: nop\n"
Arnd Bergmannc82a2cb2017-09-15 21:53:47 +0200150 : "=&r" (unused) : "r" (clkcfg));
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100151
152 local_irq_restore(flags);
153}
154
155void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
Stephen Boyd84558ff2016-11-08 14:47:56 -0800156 u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
157 void __iomem *cccr)
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100158{
159 unsigned int clkcfg = freq->clkcfg;
160 unsigned int unused, preset_mdrefr, postset_mdrefr;
161 unsigned long flags;
162
163 local_irq_save(flags);
164
165 /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
166 * we need to preset the smaller DRI before the change. If we're
167 * speeding up we need to set the larger DRI value after the change.
168 */
169 preset_mdrefr = postset_mdrefr = readl(mdrefr);
170 if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(freq->membus_khz)) {
171 preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
172 preset_mdrefr |= mdrefr_dri(freq->membus_khz);
173 }
174 postset_mdrefr =
175 (postset_mdrefr & ~MDREFR_DRI_MASK) |
176 mdrefr_dri(freq->membus_khz);
177
178 /* If we're dividing the memory clock by two for the SDRAM clock, this
179 * must be set prior to the change. Clearing the divide must be done
180 * after the change.
181 */
182 if (freq->div2) {
183 preset_mdrefr |= MDREFR_DB2_MASK;
184 postset_mdrefr |= MDREFR_DB2_MASK;
185 } else {
186 postset_mdrefr &= ~MDREFR_DB2_MASK;
187 }
188
189 /* Set new the CCCR and prepare CLKCFG */
190 writel(freq->cccr, cccr);
191
192 asm volatile(
193 " ldr r4, [%1]\n"
194 " b 2f\n"
195 " .align 5\n"
196 "1: str %3, [%1] /* preset the MDREFR */\n"
197 " mcr p14, 0, %2, c6, c0, 0 /* set CLKCFG[FCS] */\n"
198 " str %4, [%1] /* postset the MDREFR */\n"
199 " b 3f\n"
200 "2: b 1b\n"
201 "3: nop\n"
202 : "=&r" (unused)
203 : "r" (mdrefr), "r" (clkcfg), "r" (preset_mdrefr),
204 "r" (postset_mdrefr)
205 : "r4", "r5");
206
207 local_irq_restore(flags);
208}
209
210int pxa2xx_determine_rate(struct clk_rate_request *req,
211 struct pxa2xx_freq *freqs, int nb_freqs)
212{
Arnd Bergmann2517b322016-11-08 15:49:31 +0100213 int i, closest_below = -1, closest_above = -1;
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100214 unsigned long rate;
215
216 for (i = 0; i < nb_freqs; i++) {
217 rate = freqs[i].cpll;
218 if (rate == req->rate)
219 break;
220 if (rate < req->min_rate)
221 continue;
222 if (rate > req->max_rate)
223 continue;
224 if (rate <= req->rate)
225 closest_below = i;
226 if ((rate >= req->rate) && (closest_above == -1))
227 closest_above = i;
228 }
229
230 req->best_parent_hw = NULL;
231
Arnd Bergmann2517b322016-11-08 15:49:31 +0100232 if (i < nb_freqs) {
233 rate = req->rate;
234 } else if (closest_below >= 0) {
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100235 rate = freqs[closest_below].cpll;
Arnd Bergmann2517b322016-11-08 15:49:31 +0100236 } else if (closest_above >= 0) {
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100237 rate = freqs[closest_above].cpll;
Arnd Bergmann2517b322016-11-08 15:49:31 +0100238 } else {
239 pr_debug("%s(rate=%lu) no match\n", __func__, req->rate);
240 return -EINVAL;
241 }
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100242
Arnd Bergmann2517b322016-11-08 15:49:31 +0100243 pr_debug("%s(rate=%lu) rate=%lu\n", __func__, req->rate, rate);
244 req->rate = rate;
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100245
Arnd Bergmann2517b322016-11-08 15:49:31 +0100246 return 0;
Robert Jarzmik9fe69422016-11-02 22:33:06 +0100247}