blob: 0ea7af57a5b1fb3ca4d7580e3aea69797bb5c01b [file] [log] [blame]
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +03001/*
2 * Synopsys HSDK SDP Generic PLL clock driver
3 *
4 * Copyright (C) 2017 Synopsys
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <linux/clk-provider.h>
12#include <linux/delay.h>
13#include <linux/device.h>
14#include <linux/err.h>
Stephen Boyd62e59c42019-04-18 15:20:22 -070015#include <linux/io.h>
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +030016#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_device.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21
22#define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
23#define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
24#define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
25#define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
26
27#define CGU_PLL_CTRL_ODIV_SHIFT 2
28#define CGU_PLL_CTRL_IDIV_SHIFT 4
29#define CGU_PLL_CTRL_FBDIV_SHIFT 9
30#define CGU_PLL_CTRL_BAND_SHIFT 20
31
32#define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
33#define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
34#define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
35
36#define CGU_PLL_CTRL_PD BIT(0)
37#define CGU_PLL_CTRL_BYPASS BIT(1)
38
39#define CGU_PLL_STATUS_LOCK BIT(0)
40#define CGU_PLL_STATUS_ERR BIT(1)
41
42#define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
43
44#define CGU_PLL_SOURCE_MAX 1
45
46#define CORE_IF_CLK_THRESHOLD_HZ 500000000
47#define CREG_CORE_IF_CLK_DIV_1 0x0
48#define CREG_CORE_IF_CLK_DIV_2 0x1
49
50struct hsdk_pll_cfg {
51 u32 rate;
52 u32 idiv;
53 u32 fbdiv;
54 u32 odiv;
55 u32 band;
Eugeniy Paltsev423f0422020-03-11 16:41:14 +030056 u32 bypass;
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +030057};
58
59static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
Eugeniy Paltsev423f0422020-03-11 16:41:14 +030060 { 100000000, 0, 11, 3, 0, 0 },
61 { 133000000, 0, 15, 3, 0, 0 },
62 { 200000000, 1, 47, 3, 0, 0 },
63 { 233000000, 1, 27, 2, 0, 0 },
64 { 300000000, 1, 35, 2, 0, 0 },
65 { 333000000, 1, 39, 2, 0, 0 },
66 { 400000000, 1, 47, 2, 0, 0 },
67 { 500000000, 0, 14, 1, 0, 0 },
68 { 600000000, 0, 17, 1, 0, 0 },
69 { 700000000, 0, 20, 1, 0, 0 },
70 { 800000000, 0, 23, 1, 0, 0 },
71 { 900000000, 1, 26, 0, 0, 0 },
72 { 1000000000, 1, 29, 0, 0, 0 },
73 { 1100000000, 1, 32, 0, 0, 0 },
74 { 1200000000, 1, 35, 0, 0, 0 },
75 { 1300000000, 1, 38, 0, 0, 0 },
76 { 1400000000, 1, 41, 0, 0, 0 },
77 { 1500000000, 1, 44, 0, 0, 0 },
78 { 1600000000, 1, 47, 0, 0, 0 },
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +030079 {}
80};
81
82static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
Eugeniy Paltsev423f0422020-03-11 16:41:14 +030083 { 27000000, 0, 0, 0, 0, 1 },
84 { 297000000, 0, 21, 2, 0, 0 },
85 { 540000000, 0, 19, 1, 0, 0 },
86 { 594000000, 0, 21, 1, 0, 0 },
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +030087 {}
88};
89
90struct hsdk_pll_clk {
91 struct clk_hw hw;
92 void __iomem *regs;
93 void __iomem *spec_regs;
94 const struct hsdk_pll_devdata *pll_devdata;
95 struct device *dev;
96};
97
98struct hsdk_pll_devdata {
99 const struct hsdk_pll_cfg *pll_cfg;
100 int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
101 const struct hsdk_pll_cfg *cfg);
102};
103
104static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
105 const struct hsdk_pll_cfg *);
106static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
107 const struct hsdk_pll_cfg *);
108
109static const struct hsdk_pll_devdata core_pll_devdata = {
110 .pll_cfg = asdt_pll_cfg,
111 .update_rate = hsdk_pll_core_update_rate,
112};
113
114static const struct hsdk_pll_devdata sdt_pll_devdata = {
115 .pll_cfg = asdt_pll_cfg,
116 .update_rate = hsdk_pll_comm_update_rate,
117};
118
119static const struct hsdk_pll_devdata hdmi_pll_devdata = {
120 .pll_cfg = hdmi_pll_cfg,
121 .update_rate = hsdk_pll_comm_update_rate,
122};
123
124static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
125{
126 iowrite32(val, clk->regs + reg);
127}
128
129static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
130{
131 return ioread32(clk->regs + reg);
132}
133
134static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
135 const struct hsdk_pll_cfg *cfg)
136{
137 u32 val = 0;
138
Eugeniy Paltsev423f0422020-03-11 16:41:14 +0300139 if (cfg->bypass) {
140 val = hsdk_pll_read(clk, CGU_PLL_CTRL);
141 val |= CGU_PLL_CTRL_BYPASS;
142 } else {
143 /* Powerdown and Bypass bits should be cleared */
144 val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
145 val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
146 val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
147 val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
148 }
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +0300149
Colin Ian King658af6c2017-09-04 10:37:50 +0100150 dev_dbg(clk->dev, "write configuration: %#x\n", val);
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +0300151
152 hsdk_pll_write(clk, CGU_PLL_CTRL, val);
153}
154
155static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
156{
157 return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
158}
159
160static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
161{
162 return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
163}
164
165static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
166{
167 return container_of(hw, struct hsdk_pll_clk, hw);
168}
169
170static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
171 unsigned long parent_rate)
172{
173 u32 val;
174 u64 rate;
175 u32 idiv, fbdiv, odiv;
176 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
177
178 val = hsdk_pll_read(clk, CGU_PLL_CTRL);
179
Colin Ian King658af6c2017-09-04 10:37:50 +0100180 dev_dbg(clk->dev, "current configuration: %#x\n", val);
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +0300181
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +0300182 /* Check if PLL is bypassed */
183 if (val & CGU_PLL_CTRL_BYPASS)
184 return parent_rate;
185
Eugeniy Paltsev907f9292020-03-11 16:41:13 +0300186 /* Check if PLL is disabled */
187 if (val & CGU_PLL_CTRL_PD)
188 return 0;
189
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +0300190 /* input divider = reg.idiv + 1 */
191 idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
192 /* fb divider = 2*(reg.fbdiv + 1) */
193 fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
194 /* output divider = 2^(reg.odiv) */
195 odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
196
197 rate = (u64)parent_rate * fbdiv;
198 do_div(rate, idiv * odiv);
199
200 return rate;
201}
202
203static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
204 unsigned long *prate)
205{
206 int i;
207 unsigned long best_rate;
208 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
209 const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
210
211 if (pll_cfg[0].rate == 0)
212 return -EINVAL;
213
214 best_rate = pll_cfg[0].rate;
215
216 for (i = 1; pll_cfg[i].rate != 0; i++) {
217 if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
218 best_rate = pll_cfg[i].rate;
219 }
220
221 dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
222
223 return best_rate;
224}
225
226static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
227 unsigned long rate,
228 const struct hsdk_pll_cfg *cfg)
229{
230 hsdk_pll_set_cfg(clk, cfg);
231
232 /*
233 * Wait until CGU relocks and check error status.
234 * If after timeout CGU is unlocked yet return error.
235 */
236 udelay(HSDK_PLL_MAX_LOCK_TIME);
237 if (!hsdk_pll_is_locked(clk))
238 return -ETIMEDOUT;
239
240 if (hsdk_pll_is_err(clk))
241 return -EINVAL;
242
243 return 0;
244}
245
246static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
247 unsigned long rate,
248 const struct hsdk_pll_cfg *cfg)
249{
250 /*
251 * When core clock exceeds 500MHz, the divider for the interface
252 * clock must be programmed to div-by-2.
253 */
254 if (rate > CORE_IF_CLK_THRESHOLD_HZ)
255 iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
256
257 hsdk_pll_set_cfg(clk, cfg);
258
259 /*
260 * Wait until CGU relocks and check error status.
261 * If after timeout CGU is unlocked yet return error.
262 */
263 udelay(HSDK_PLL_MAX_LOCK_TIME);
264 if (!hsdk_pll_is_locked(clk))
265 return -ETIMEDOUT;
266
267 if (hsdk_pll_is_err(clk))
268 return -EINVAL;
269
270 /*
271 * Program divider to div-by-1 if we succesfuly set core clock below
272 * 500MHz threshold.
273 */
274 if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
275 iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
276
277 return 0;
278}
279
280static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
281 unsigned long parent_rate)
282{
283 int i;
284 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
285 const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
286
287 for (i = 0; pll_cfg[i].rate != 0; i++) {
288 if (pll_cfg[i].rate == rate) {
289 return clk->pll_devdata->update_rate(clk, rate,
290 &pll_cfg[i]);
291 }
292 }
293
294 dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
295 parent_rate);
296
297 return -EINVAL;
298}
299
300static const struct clk_ops hsdk_pll_ops = {
301 .recalc_rate = hsdk_pll_recalc_rate,
302 .round_rate = hsdk_pll_round_rate,
303 .set_rate = hsdk_pll_set_rate,
304};
305
306static int hsdk_pll_clk_probe(struct platform_device *pdev)
307{
308 int ret;
309 struct resource *mem;
310 const char *parent_name;
311 unsigned int num_parents;
312 struct hsdk_pll_clk *pll_clk;
313 struct clk_init_data init = { };
314 struct device *dev = &pdev->dev;
315
316 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
317 if (!pll_clk)
318 return -ENOMEM;
319
320 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
321 pll_clk->regs = devm_ioremap_resource(dev, mem);
322 if (IS_ERR(pll_clk->regs))
323 return PTR_ERR(pll_clk->regs);
324
325 init.name = dev->of_node->name;
326 init.ops = &hsdk_pll_ops;
327 parent_name = of_clk_get_parent_name(dev->of_node, 0);
328 init.parent_names = &parent_name;
329 num_parents = of_clk_get_parent_count(dev->of_node);
330 if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
331 dev_err(dev, "wrong clock parents number: %u\n", num_parents);
332 return -EINVAL;
333 }
334 init.num_parents = num_parents;
335
336 pll_clk->hw.init = &init;
337 pll_clk->dev = dev;
338 pll_clk->pll_devdata = of_device_get_match_data(dev);
339
340 if (!pll_clk->pll_devdata) {
341 dev_err(dev, "No OF match data provided\n");
342 return -EINVAL;
343 }
344
345 ret = devm_clk_hw_register(dev, &pll_clk->hw);
346 if (ret) {
347 dev_err(dev, "failed to register %s clock\n", init.name);
348 return ret;
349 }
350
351 return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
352 &pll_clk->hw);
353}
354
355static int hsdk_pll_clk_remove(struct platform_device *pdev)
356{
357 of_clk_del_provider(pdev->dev.of_node);
358 return 0;
359}
360
361static void __init of_hsdk_pll_clk_setup(struct device_node *node)
362{
363 int ret;
364 const char *parent_name;
365 unsigned int num_parents;
366 struct hsdk_pll_clk *pll_clk;
367 struct clk_init_data init = { };
368
369 pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
370 if (!pll_clk)
371 return;
372
373 pll_clk->regs = of_iomap(node, 0);
374 if (!pll_clk->regs) {
375 pr_err("failed to map pll registers\n");
376 goto err_free_pll_clk;
377 }
378
379 pll_clk->spec_regs = of_iomap(node, 1);
380 if (!pll_clk->spec_regs) {
381 pr_err("failed to map pll registers\n");
382 goto err_unmap_comm_regs;
383 }
384
385 init.name = node->name;
386 init.ops = &hsdk_pll_ops;
387 parent_name = of_clk_get_parent_name(node, 0);
388 init.parent_names = &parent_name;
389 num_parents = of_clk_get_parent_count(node);
390 if (num_parents > CGU_PLL_SOURCE_MAX) {
391 pr_err("too much clock parents: %u\n", num_parents);
392 goto err_unmap_spec_regs;
393 }
394 init.num_parents = num_parents;
395
396 pll_clk->hw.init = &init;
397 pll_clk->pll_devdata = &core_pll_devdata;
398
399 ret = clk_hw_register(NULL, &pll_clk->hw);
400 if (ret) {
Rob Herringe665f022018-08-28 10:44:29 -0500401 pr_err("failed to register %pOFn clock\n", node);
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +0300402 goto err_unmap_spec_regs;
403 }
404
405 ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
406 if (ret) {
Rob Herringe665f022018-08-28 10:44:29 -0500407 pr_err("failed to add hw provider for %pOFn clock\n", node);
Eugeniy Paltsevdaeeb432017-08-25 20:39:14 +0300408 goto err_unmap_spec_regs;
409 }
410
411 return;
412
413err_unmap_spec_regs:
414 iounmap(pll_clk->spec_regs);
415err_unmap_comm_regs:
416 iounmap(pll_clk->regs);
417err_free_pll_clk:
418 kfree(pll_clk);
419}
420
421/* Core PLL needed early for ARC cpus timers */
422CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
423of_hsdk_pll_clk_setup);
424
425static const struct of_device_id hsdk_pll_clk_id[] = {
426 { .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
427 { .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
428 { }
429};
430
431static struct platform_driver hsdk_pll_clk_driver = {
432 .driver = {
433 .name = "hsdk-gp-pll-clock",
434 .of_match_table = hsdk_pll_clk_id,
435 },
436 .probe = hsdk_pll_clk_probe,
437 .remove = hsdk_pll_clk_remove,
438};
439builtin_platform_driver(hsdk_pll_clk_driver);