blob: d5946f7486d6c21fd904b46ee9434f891a3c5dcb [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Tang Yuantian555eae92013-04-09 16:46:26 +08002/*
3 * Copyright 2013 Freescale Semiconductor, Inc.
4 *
Tang Yuantian93a17c02015-01-15 14:03:41 +08005 * clock driver for Freescale QorIQ SoCs.
Tang Yuantian555eae92013-04-09 16:46:26 +08006 */
Emil Medvec88b2b62015-01-21 04:03:29 -06007
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Scott Wood0dfc86b2015-09-19 23:29:54 -050010#include <linux/clk.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080011#include <linux/clk-provider.h>
Yuantian Tang45899dc2017-04-06 10:21:23 +080012#include <linux/clkdev.h>
Scott Wood0dfc86b2015-09-19 23:29:54 -050013#include <linux/fsl/guts.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080014#include <linux/io.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
Rob Herringc11eede2013-11-10 23:19:08 -060017#include <linux/of_address.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080018#include <linux/of_platform.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21
Scott Wood0dfc86b2015-09-19 23:29:54 -050022#define PLL_DIV1 0
23#define PLL_DIV2 1
24#define PLL_DIV3 2
25#define PLL_DIV4 3
26
27#define PLATFORM_PLL 0
28#define CGA_PLL1 1
29#define CGA_PLL2 2
30#define CGA_PLL3 3
31#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
32#define CGB_PLL1 4
33#define CGB_PLL2 5
Yuantian Tangcc61ab92019-04-22 17:15:09 +080034#define MAX_PLL_DIV 16
Scott Wood0dfc86b2015-09-19 23:29:54 -050035
36struct clockgen_pll_div {
37 struct clk *clk;
38 char name[32];
Tang Yuantian555eae92013-04-09 16:46:26 +080039};
40
Scott Wood0dfc86b2015-09-19 23:29:54 -050041struct clockgen_pll {
Yuantian Tangcc61ab92019-04-22 17:15:09 +080042 struct clockgen_pll_div div[MAX_PLL_DIV];
Scott Wood0dfc86b2015-09-19 23:29:54 -050043};
Tang Yuantian555eae92013-04-09 16:46:26 +080044
Scott Wood0dfc86b2015-09-19 23:29:54 -050045#define CLKSEL_VALID 1
46#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
47
48struct clockgen_sourceinfo {
49 u32 flags; /* CLKSEL_xxx */
50 int pll; /* CGx_PLLn */
51 int div; /* PLL_DIVn */
52};
53
54#define NUM_MUX_PARENTS 16
55
56struct clockgen_muxinfo {
57 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
58};
59
60#define NUM_HWACCEL 5
61#define NUM_CMUX 8
62
63struct clockgen;
64
65/*
66 * cmux freq must be >= platform pll.
67 * If not set, cmux freq must be >= platform pll/2
68 */
69#define CG_CMUX_GE_PLAT 1
Scott Wood9e19ca22015-09-19 23:29:55 -050070
Scott Wood0dfc86b2015-09-19 23:29:54 -050071#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
Scott Wood9e19ca22015-09-19 23:29:55 -050072#define CG_VER3 4 /* version 3 cg: reg layout different */
73#define CG_LITTLE_ENDIAN 8
Scott Wood0dfc86b2015-09-19 23:29:54 -050074
75struct clockgen_chipinfo {
76 const char *compat, *guts_compat;
77 const struct clockgen_muxinfo *cmux_groups[2];
78 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
79 void (*init_periph)(struct clockgen *cg);
Yogesh Gaur42614b52019-04-25 09:47:48 +000080 int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */
Scott Wood0dfc86b2015-09-19 23:29:54 -050081 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
82 u32 flags; /* CG_xxx */
83};
84
85struct clockgen {
86 struct device_node *node;
87 void __iomem *regs;
88 struct clockgen_chipinfo info; /* mutable copy */
Scott Wood80b4ae72017-03-20 10:37:23 +080089 struct clk *sysclk, *coreclk;
Scott Wood0dfc86b2015-09-19 23:29:54 -050090 struct clockgen_pll pll[6];
91 struct clk *cmux[NUM_CMUX];
92 struct clk *hwaccel[NUM_HWACCEL];
93 struct clk *fman[2];
94 struct ccsr_guts __iomem *guts;
95};
96
97static struct clockgen clockgen;
98
Scott Wood9e19ca22015-09-19 23:29:55 -050099static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
100{
101 if (cg->info.flags & CG_LITTLE_ENDIAN)
102 iowrite32(val, reg);
103 else
104 iowrite32be(val, reg);
105}
106
107static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
108{
109 u32 val;
110
111 if (cg->info.flags & CG_LITTLE_ENDIAN)
112 val = ioread32(reg);
113 else
114 val = ioread32be(reg);
115
116 return val;
117}
118
Scott Wood0dfc86b2015-09-19 23:29:54 -0500119static const struct clockgen_muxinfo p2041_cmux_grp1 = {
120 {
121 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
122 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
123 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
124 }
125};
126
127static const struct clockgen_muxinfo p2041_cmux_grp2 = {
128 {
129 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
Scott Wood2c7693e2015-10-22 23:21:46 -0500130 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
131 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
Scott Wood0dfc86b2015-09-19 23:29:54 -0500132 }
133};
134
135static const struct clockgen_muxinfo p5020_cmux_grp1 = {
136 {
137 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
138 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
139 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
140 }
141};
142
143static const struct clockgen_muxinfo p5020_cmux_grp2 = {
144 {
145 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
146 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
147 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
148 }
149};
150
151static const struct clockgen_muxinfo p5040_cmux_grp1 = {
152 {
153 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
154 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
155 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
156 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
157 }
158};
159
160static const struct clockgen_muxinfo p5040_cmux_grp2 = {
161 {
162 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
163 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
164 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
165 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
166 }
167};
168
169static const struct clockgen_muxinfo p4080_cmux_grp1 = {
170 {
171 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
172 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
173 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
174 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
175 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
176 }
177};
178
179static const struct clockgen_muxinfo p4080_cmux_grp2 = {
180 {
181 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
182 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
183 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
184 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
185 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
186 }
187};
188
189static const struct clockgen_muxinfo t1023_cmux = {
190 {
191 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
192 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
193 }
194};
195
196static const struct clockgen_muxinfo t1040_cmux = {
197 {
198 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
199 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
200 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
201 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
202 }
203};
204
205
206static const struct clockgen_muxinfo clockgen2_cmux_cga = {
207 {
208 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
211 {},
212 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
215 {},
216 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
219 },
220};
221
222static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
223 {
224 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
227 {},
228 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
231 },
232};
233
234static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
235 {
236 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
239 {},
240 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
243 },
244};
245
Yuantian Tang95089f62019-04-24 09:19:12 +0800246static const struct clockgen_muxinfo ls1028a_hwa1 = {
247 {
248 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
249 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
250 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
253 {},
254 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
256 },
257};
258
259static const struct clockgen_muxinfo ls1028a_hwa2 = {
260 {
261 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
262 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
264 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
266 {},
267 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
268 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
269 },
270};
271
272static const struct clockgen_muxinfo ls1028a_hwa3 = {
273 {
274 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
275 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
276 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
277 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
278 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
279 {},
280 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
281 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
282 },
283};
284
285static const struct clockgen_muxinfo ls1028a_hwa4 = {
286 {
287 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
288 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
289 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
290 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
291 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
292 {},
293 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
294 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
295 },
296};
297
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800298static const struct clockgen_muxinfo ls1043a_hwa1 = {
299 {
300 {},
301 {},
302 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
303 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
304 {},
305 {},
306 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
307 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
308 },
309};
310
311static const struct clockgen_muxinfo ls1043a_hwa2 = {
312 {
313 {},
314 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
315 {},
316 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
317 },
318};
319
Mingkai Hu80e52192016-09-07 11:48:30 +0800320static const struct clockgen_muxinfo ls1046a_hwa1 = {
321 {
322 {},
323 {},
324 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
325 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
326 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
327 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
328 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
329 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
330 },
331};
332
333static const struct clockgen_muxinfo ls1046a_hwa2 = {
334 {
335 {},
336 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
337 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
338 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
339 {},
340 {},
341 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
342 },
343};
344
Yangbo Lua9328722019-12-16 18:01:11 +0800345static const struct clockgen_muxinfo ls1088a_hwa1 = {
346 {
347 {},
348 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
349 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
350 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
351 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
352 {},
353 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
354 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
355 },
356};
357
358static const struct clockgen_muxinfo ls1088a_hwa2 = {
359 {
360 {},
361 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
362 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
363 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
364 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
365 {},
366 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
367 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
368 },
369};
370
Tang Yuantian44709352016-11-24 10:36:55 +0800371static const struct clockgen_muxinfo ls1012a_cmux = {
372 {
373 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
374 {},
375 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
376 }
377};
378
Scott Wood0dfc86b2015-09-19 23:29:54 -0500379static const struct clockgen_muxinfo t1023_hwa1 = {
380 {
381 {},
382 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
383 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
384 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
385 },
386};
387
388static const struct clockgen_muxinfo t1023_hwa2 = {
389 {
390 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
391 },
392};
393
394static const struct clockgen_muxinfo t2080_hwa1 = {
395 {
396 {},
397 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
398 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
399 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
400 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
401 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
402 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
403 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
404 },
405};
406
407static const struct clockgen_muxinfo t2080_hwa2 = {
408 {
409 {},
410 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
411 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
412 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
413 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
414 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
415 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
416 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
417 },
418};
419
420static const struct clockgen_muxinfo t4240_hwa1 = {
421 {
422 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
423 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
424 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
425 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
426 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
427 {},
428 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
429 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
430 },
431};
432
433static const struct clockgen_muxinfo t4240_hwa4 = {
434 {
435 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
436 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
437 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
438 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
439 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
440 },
441};
442
443static const struct clockgen_muxinfo t4240_hwa5 = {
444 {
445 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
446 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
447 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
448 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
449 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
450 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
451 },
452};
453
454#define RCWSR7_FM1_CLK_SEL 0x40000000
455#define RCWSR7_FM2_CLK_SEL 0x20000000
456#define RCWSR7_HWA_ASYNC_DIV 0x04000000
457
458static void __init p2041_init_periph(struct clockgen *cg)
Tang Yuantian555eae92013-04-09 16:46:26 +0800459{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500460 u32 reg;
461
462 reg = ioread32be(&cg->guts->rcwsr[7]);
463
464 if (reg & RCWSR7_FM1_CLK_SEL)
465 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
466 else
467 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
468}
469
470static void __init p4080_init_periph(struct clockgen *cg)
471{
472 u32 reg;
473
474 reg = ioread32be(&cg->guts->rcwsr[7]);
475
476 if (reg & RCWSR7_FM1_CLK_SEL)
477 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
478 else
479 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
480
481 if (reg & RCWSR7_FM2_CLK_SEL)
482 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
483 else
484 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
485}
486
487static void __init p5020_init_periph(struct clockgen *cg)
488{
489 u32 reg;
490 int div = PLL_DIV2;
491
492 reg = ioread32be(&cg->guts->rcwsr[7]);
493 if (reg & RCWSR7_HWA_ASYNC_DIV)
494 div = PLL_DIV4;
495
496 if (reg & RCWSR7_FM1_CLK_SEL)
497 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
498 else
499 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
500}
501
502static void __init p5040_init_periph(struct clockgen *cg)
503{
504 u32 reg;
505 int div = PLL_DIV2;
506
507 reg = ioread32be(&cg->guts->rcwsr[7]);
508 if (reg & RCWSR7_HWA_ASYNC_DIV)
509 div = PLL_DIV4;
510
511 if (reg & RCWSR7_FM1_CLK_SEL)
512 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
513 else
514 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
515
516 if (reg & RCWSR7_FM2_CLK_SEL)
517 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
518 else
519 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
520}
521
522static void __init t1023_init_periph(struct clockgen *cg)
523{
524 cg->fman[0] = cg->hwaccel[1];
525}
526
527static void __init t1040_init_periph(struct clockgen *cg)
528{
529 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
530}
531
532static void __init t2080_init_periph(struct clockgen *cg)
533{
534 cg->fman[0] = cg->hwaccel[0];
535}
536
537static void __init t4240_init_periph(struct clockgen *cg)
538{
539 cg->fman[0] = cg->hwaccel[3];
540 cg->fman[1] = cg->hwaccel[4];
541}
542
543static const struct clockgen_chipinfo chipinfo[] = {
544 {
545 .compat = "fsl,b4420-clockgen",
546 .guts_compat = "fsl,b4860-device-config",
547 .init_periph = t2080_init_periph,
548 .cmux_groups = {
549 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
550 },
551 .hwaccel = {
552 &t2080_hwa1
553 },
554 .cmux_to_group = {
555 0, 1, 1, 1, -1
556 },
557 .pll_mask = 0x3f,
558 .flags = CG_PLL_8BIT,
559 },
560 {
561 .compat = "fsl,b4860-clockgen",
562 .guts_compat = "fsl,b4860-device-config",
563 .init_periph = t2080_init_periph,
564 .cmux_groups = {
565 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
566 },
567 .hwaccel = {
568 &t2080_hwa1
569 },
570 .cmux_to_group = {
571 0, 1, 1, 1, -1
572 },
573 .pll_mask = 0x3f,
574 .flags = CG_PLL_8BIT,
575 },
576 {
577 .compat = "fsl,ls1021a-clockgen",
578 .cmux_groups = {
579 &t1023_cmux
580 },
581 .cmux_to_group = {
582 0, -1
583 },
584 .pll_mask = 0x03,
585 },
586 {
Yuantian Tang95089f62019-04-24 09:19:12 +0800587 .compat = "fsl,ls1028a-clockgen",
588 .cmux_groups = {
589 &clockgen2_cmux_cga12
590 },
591 .hwaccel = {
592 &ls1028a_hwa1, &ls1028a_hwa2,
593 &ls1028a_hwa3, &ls1028a_hwa4
594 },
595 .cmux_to_group = {
596 0, 0, 0, 0, -1
597 },
598 .pll_mask = 0x07,
599 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
600 },
601 {
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800602 .compat = "fsl,ls1043a-clockgen",
603 .init_periph = t2080_init_periph,
604 .cmux_groups = {
605 &t1040_cmux
606 },
607 .hwaccel = {
608 &ls1043a_hwa1, &ls1043a_hwa2
609 },
610 .cmux_to_group = {
611 0, -1
612 },
613 .pll_mask = 0x07,
614 .flags = CG_PLL_8BIT,
615 },
616 {
Mingkai Hu80e52192016-09-07 11:48:30 +0800617 .compat = "fsl,ls1046a-clockgen",
618 .init_periph = t2080_init_periph,
619 .cmux_groups = {
620 &t1040_cmux
621 },
622 .hwaccel = {
623 &ls1046a_hwa1, &ls1046a_hwa2
624 },
625 .cmux_to_group = {
626 0, -1
627 },
628 .pll_mask = 0x07,
629 .flags = CG_PLL_8BIT,
630 },
631 {
Yuantian Tange0c888c42017-04-06 10:21:22 +0800632 .compat = "fsl,ls1088a-clockgen",
633 .cmux_groups = {
634 &clockgen2_cmux_cga12
635 },
Yangbo Lua9328722019-12-16 18:01:11 +0800636 .hwaccel = {
637 &ls1088a_hwa1, &ls1088a_hwa2
638 },
Yuantian Tange0c888c42017-04-06 10:21:22 +0800639 .cmux_to_group = {
640 0, 0, -1
641 },
642 .pll_mask = 0x07,
643 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
644 },
645 {
Tang Yuantian44709352016-11-24 10:36:55 +0800646 .compat = "fsl,ls1012a-clockgen",
647 .cmux_groups = {
648 &ls1012a_cmux
649 },
650 .cmux_to_group = {
651 0, -1
652 },
653 .pll_mask = 0x03,
654 },
655 {
Scott Wood9e19ca22015-09-19 23:29:55 -0500656 .compat = "fsl,ls2080a-clockgen",
657 .cmux_groups = {
658 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
659 },
660 .cmux_to_group = {
661 0, 0, 1, 1, -1
662 },
663 .pll_mask = 0x37,
664 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
665 },
666 {
Vabhav Sharma78a5ba82019-04-26 06:53:38 +0000667 .compat = "fsl,lx2160a-clockgen",
668 .cmux_groups = {
669 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
670 },
671 .cmux_to_group = {
672 0, 0, 0, 0, 1, 1, 1, 1, -1
673 },
674 .pll_mask = 0x37,
675 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
676 },
677 {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500678 .compat = "fsl,p2041-clockgen",
679 .guts_compat = "fsl,qoriq-device-config-1.0",
680 .init_periph = p2041_init_periph,
681 .cmux_groups = {
682 &p2041_cmux_grp1, &p2041_cmux_grp2
683 },
684 .cmux_to_group = {
685 0, 0, 1, 1, -1
686 },
687 .pll_mask = 0x07,
688 },
689 {
690 .compat = "fsl,p3041-clockgen",
691 .guts_compat = "fsl,qoriq-device-config-1.0",
692 .init_periph = p2041_init_periph,
693 .cmux_groups = {
694 &p2041_cmux_grp1, &p2041_cmux_grp2
695 },
696 .cmux_to_group = {
697 0, 0, 1, 1, -1
698 },
699 .pll_mask = 0x07,
700 },
701 {
702 .compat = "fsl,p4080-clockgen",
703 .guts_compat = "fsl,qoriq-device-config-1.0",
704 .init_periph = p4080_init_periph,
705 .cmux_groups = {
706 &p4080_cmux_grp1, &p4080_cmux_grp2
707 },
708 .cmux_to_group = {
Yogesh Gaur42614b52019-04-25 09:47:48 +0000709 0, 0, 0, 0, 1, 1, 1, 1, -1
Scott Wood0dfc86b2015-09-19 23:29:54 -0500710 },
711 .pll_mask = 0x1f,
712 },
713 {
714 .compat = "fsl,p5020-clockgen",
715 .guts_compat = "fsl,qoriq-device-config-1.0",
716 .init_periph = p5020_init_periph,
717 .cmux_groups = {
Nathan Huckleberrya95fb582019-06-27 15:06:42 -0700718 &p5020_cmux_grp1, &p5020_cmux_grp2
Scott Wood0dfc86b2015-09-19 23:29:54 -0500719 },
720 .cmux_to_group = {
721 0, 1, -1
722 },
723 .pll_mask = 0x07,
724 },
725 {
726 .compat = "fsl,p5040-clockgen",
727 .guts_compat = "fsl,p5040-device-config",
728 .init_periph = p5040_init_periph,
729 .cmux_groups = {
730 &p5040_cmux_grp1, &p5040_cmux_grp2
731 },
732 .cmux_to_group = {
733 0, 0, 1, 1, -1
734 },
735 .pll_mask = 0x0f,
736 },
737 {
738 .compat = "fsl,t1023-clockgen",
739 .guts_compat = "fsl,t1023-device-config",
740 .init_periph = t1023_init_periph,
741 .cmux_groups = {
742 &t1023_cmux
743 },
744 .hwaccel = {
745 &t1023_hwa1, &t1023_hwa2
746 },
747 .cmux_to_group = {
748 0, 0, -1
749 },
750 .pll_mask = 0x03,
751 .flags = CG_PLL_8BIT,
752 },
753 {
754 .compat = "fsl,t1040-clockgen",
755 .guts_compat = "fsl,t1040-device-config",
756 .init_periph = t1040_init_periph,
757 .cmux_groups = {
758 &t1040_cmux
759 },
760 .cmux_to_group = {
761 0, 0, 0, 0, -1
762 },
763 .pll_mask = 0x07,
764 .flags = CG_PLL_8BIT,
765 },
766 {
767 .compat = "fsl,t2080-clockgen",
768 .guts_compat = "fsl,t2080-device-config",
769 .init_periph = t2080_init_periph,
770 .cmux_groups = {
771 &clockgen2_cmux_cga12
772 },
773 .hwaccel = {
774 &t2080_hwa1, &t2080_hwa2
775 },
776 .cmux_to_group = {
777 0, -1
778 },
779 .pll_mask = 0x07,
780 .flags = CG_PLL_8BIT,
781 },
782 {
783 .compat = "fsl,t4240-clockgen",
784 .guts_compat = "fsl,t4240-device-config",
785 .init_periph = t4240_init_periph,
786 .cmux_groups = {
787 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
788 },
789 .hwaccel = {
790 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
791 },
792 .cmux_to_group = {
793 0, 0, 1, -1
794 },
795 .pll_mask = 0x3f,
796 .flags = CG_PLL_8BIT,
797 },
798 {},
799};
800
801struct mux_hwclock {
802 struct clk_hw hw;
803 struct clockgen *cg;
804 const struct clockgen_muxinfo *info;
805 u32 __iomem *reg;
806 u8 parent_to_clksel[NUM_MUX_PARENTS];
807 s8 clksel_to_parent[NUM_MUX_PARENTS];
808 int num_parents;
809};
810
811#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
812#define CLKSEL_MASK 0x78000000
813#define CLKSEL_SHIFT 27
814
815static int mux_set_parent(struct clk_hw *hw, u8 idx)
816{
817 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800818 u32 clksel;
819
Scott Wood0dfc86b2015-09-19 23:29:54 -0500820 if (idx >= hwc->num_parents)
821 return -EINVAL;
822
823 clksel = hwc->parent_to_clksel[idx];
Scott Wood9e19ca22015-09-19 23:29:55 -0500824 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
Tang Yuantian555eae92013-04-09 16:46:26 +0800825
826 return 0;
827}
828
Scott Wood0dfc86b2015-09-19 23:29:54 -0500829static u8 mux_get_parent(struct clk_hw *hw)
Tang Yuantian555eae92013-04-09 16:46:26 +0800830{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500831 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800832 u32 clksel;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500833 s8 ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800834
Scott Wood9e19ca22015-09-19 23:29:55 -0500835 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Tang Yuantian555eae92013-04-09 16:46:26 +0800836
Scott Wood0dfc86b2015-09-19 23:29:54 -0500837 ret = hwc->clksel_to_parent[clksel];
838 if (ret < 0) {
839 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
840 return 0;
841 }
842
843 return ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800844}
845
Emil Medve334680d2015-01-21 04:03:27 -0600846static const struct clk_ops cmux_ops = {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500847 .get_parent = mux_get_parent,
848 .set_parent = mux_set_parent,
Tang Yuantian555eae92013-04-09 16:46:26 +0800849};
850
Scott Wood0dfc86b2015-09-19 23:29:54 -0500851/*
852 * Don't allow setting for now, as the clock options haven't been
853 * sanitized for additional restrictions.
854 */
855static const struct clk_ops hwaccel_ops = {
856 .get_parent = mux_get_parent,
857};
858
859static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
860 struct mux_hwclock *hwc,
861 int idx)
862{
863 int pll, div;
864
865 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
866 return NULL;
867
868 pll = hwc->info->clksel[idx].pll;
869 div = hwc->info->clksel[idx].div;
870
871 return &cg->pll[pll].div[div];
872}
873
874static struct clk * __init create_mux_common(struct clockgen *cg,
875 struct mux_hwclock *hwc,
876 const struct clk_ops *ops,
877 unsigned long min_rate,
Scott Wood7c1c5412016-10-17 13:42:23 -0500878 unsigned long max_rate,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500879 unsigned long pct80_rate,
880 const char *fmt, int idx)
881{
882 struct clk_init_data init = {};
883 struct clk *clk;
884 const struct clockgen_pll_div *div;
885 const char *parent_names[NUM_MUX_PARENTS];
886 char name[32];
887 int i, j;
888
889 snprintf(name, sizeof(name), fmt, idx);
890
891 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
892 unsigned long rate;
893
894 hwc->clksel_to_parent[i] = -1;
895
896 div = get_pll_div(cg, hwc, i);
897 if (!div)
898 continue;
899
900 rate = clk_get_rate(div->clk);
901
902 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
903 rate > pct80_rate)
904 continue;
905 if (rate < min_rate)
906 continue;
Scott Wood7c1c5412016-10-17 13:42:23 -0500907 if (rate > max_rate)
908 continue;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500909
910 parent_names[j] = div->name;
911 hwc->parent_to_clksel[j] = i;
912 hwc->clksel_to_parent[i] = j;
913 j++;
914 }
915
916 init.name = name;
917 init.ops = ops;
918 init.parent_names = parent_names;
919 init.num_parents = hwc->num_parents = j;
920 init.flags = 0;
921 hwc->hw.init = &init;
922 hwc->cg = cg;
923
924 clk = clk_register(NULL, &hwc->hw);
925 if (IS_ERR(clk)) {
926 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
927 PTR_ERR(clk));
928 kfree(hwc);
929 return NULL;
930 }
931
932 return clk;
933}
934
935static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
936{
937 struct mux_hwclock *hwc;
938 const struct clockgen_pll_div *div;
939 unsigned long plat_rate, min_rate;
Scott Wood7c1c5412016-10-17 13:42:23 -0500940 u64 max_rate, pct80_rate;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500941 u32 clksel;
942
943 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
944 if (!hwc)
945 return NULL;
946
Tang Yuantian89641932016-08-15 15:28:20 +0800947 if (cg->info.flags & CG_VER3)
948 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
949 else
950 hwc->reg = cg->regs + 0x20 * idx;
951
Scott Wood0dfc86b2015-09-19 23:29:54 -0500952 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
953
954 /*
955 * Find the rate for the default clksel, and treat it as the
956 * maximum rated core frequency. If this is an incorrect
957 * assumption, certain clock options (possibly including the
958 * default clksel) may be inappropriately excluded on certain
959 * chips.
960 */
Scott Wood9e19ca22015-09-19 23:29:55 -0500961 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500962 div = get_pll_div(cg, hwc, clksel);
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530963 if (!div) {
964 kfree(hwc);
Scott Wood0dfc86b2015-09-19 23:29:54 -0500965 return NULL;
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530966 }
Scott Wood0dfc86b2015-09-19 23:29:54 -0500967
Scott Wood7c1c5412016-10-17 13:42:23 -0500968 max_rate = clk_get_rate(div->clk);
969 pct80_rate = max_rate * 8;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500970 do_div(pct80_rate, 10);
971
972 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
973
974 if (cg->info.flags & CG_CMUX_GE_PLAT)
975 min_rate = plat_rate;
976 else
977 min_rate = plat_rate / 2;
978
Scott Wood7c1c5412016-10-17 13:42:23 -0500979 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500980 pct80_rate, "cg-cmux%d", idx);
981}
982
983static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
984{
985 struct mux_hwclock *hwc;
986
987 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
988 if (!hwc)
989 return NULL;
990
991 hwc->reg = cg->regs + 0x20 * idx + 0x10;
992 hwc->info = cg->info.hwaccel[idx];
993
Scott Wood7c1c5412016-10-17 13:42:23 -0500994 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500995 "cg-hwaccel%d", idx);
996}
997
998static void __init create_muxes(struct clockgen *cg)
999{
1000 int i;
1001
1002 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
1003 if (cg->info.cmux_to_group[i] < 0)
1004 break;
1005 if (cg->info.cmux_to_group[i] >=
1006 ARRAY_SIZE(cg->info.cmux_groups)) {
1007 WARN_ON_ONCE(1);
1008 continue;
1009 }
1010
1011 cg->cmux[i] = create_one_cmux(cg, i);
1012 }
1013
1014 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
1015 if (!cg->info.hwaccel[i])
1016 continue;
1017
1018 cg->hwaccel[i] = create_one_hwaccel(cg, i);
1019 }
1020}
1021
1022static void __init clockgen_init(struct device_node *np);
1023
Scott Wood80b4ae72017-03-20 10:37:23 +08001024/*
1025 * Legacy nodes may get probed before the parent clockgen node.
1026 * It is assumed that device trees with legacy nodes will not
1027 * contain a "clocks" property -- otherwise the input clocks may
1028 * not be initialized at this point.
1029 */
Scott Wood0dfc86b2015-09-19 23:29:54 -05001030static void __init legacy_init_clockgen(struct device_node *np)
1031{
1032 if (!clockgen.node)
1033 clockgen_init(of_get_parent(np));
1034}
1035
1036/* Legacy node */
Tang Yuantian555eae92013-04-09 16:46:26 +08001037static void __init core_mux_init(struct device_node *np)
1038{
1039 struct clk *clk;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001040 struct resource res;
1041 int idx, rc;
Tang Yuantian555eae92013-04-09 16:46:26 +08001042
Scott Wood0dfc86b2015-09-19 23:29:54 -05001043 legacy_init_clockgen(np);
Tang Yuantian555eae92013-04-09 16:46:26 +08001044
Scott Wood0dfc86b2015-09-19 23:29:54 -05001045 if (of_address_to_resource(np, 0, &res))
Tang Yuantian555eae92013-04-09 16:46:26 +08001046 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001047
Scott Wood0dfc86b2015-09-19 23:29:54 -05001048 idx = (res.start & 0xf0) >> 5;
1049 clk = clockgen.cmux[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +08001050
1051 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
1052 if (rc) {
Rob Herringe665f022018-08-28 10:44:29 -05001053 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1054 __func__, np, rc);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001055 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001056 }
Tang Yuantian555eae92013-04-09 16:46:26 +08001057}
1058
Julia Lawall3432a2e2016-04-18 16:55:34 +02001059static struct clk __init
1060*sysclk_from_fixed(struct device_node *node, const char *name)
Tang Yuantian555eae92013-04-09 16:46:26 +08001061{
Scott Wood0dfc86b2015-09-19 23:29:54 -05001062 u32 rate;
Tang Yuantian555eae92013-04-09 16:46:26 +08001063
Scott Wood0dfc86b2015-09-19 23:29:54 -05001064 if (of_property_read_u32(node, "clock-frequency", &rate))
1065 return ERR_PTR(-ENODEV);
1066
Stephen Boydec3f2fc2016-03-01 11:00:19 -08001067 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001068}
1069
Scott Wood80b4ae72017-03-20 10:37:23 +08001070static struct clk __init *input_clock(const char *name, struct clk *clk)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001071{
Scott Wood80b4ae72017-03-20 10:37:23 +08001072 const char *input_name;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001073
1074 /* Register the input clock under the desired name. */
Scott Wood80b4ae72017-03-20 10:37:23 +08001075 input_name = __clk_get_name(clk);
1076 clk = clk_register_fixed_factor(NULL, name, input_name,
Scott Wood0dfc86b2015-09-19 23:29:54 -05001077 0, 1, 1);
1078 if (IS_ERR(clk))
1079 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
1080 PTR_ERR(clk));
1081
1082 return clk;
1083}
1084
Scott Wood80b4ae72017-03-20 10:37:23 +08001085static struct clk __init *input_clock_by_name(const char *name,
1086 const char *dtname)
1087{
1088 struct clk *clk;
1089
1090 clk = of_clk_get_by_name(clockgen.node, dtname);
1091 if (IS_ERR(clk))
1092 return clk;
1093
1094 return input_clock(name, clk);
1095}
1096
1097static struct clk __init *input_clock_by_index(const char *name, int idx)
1098{
1099 struct clk *clk;
1100
1101 clk = of_clk_get(clockgen.node, 0);
1102 if (IS_ERR(clk))
1103 return clk;
1104
1105 return input_clock(name, clk);
1106}
1107
Scott Wood0dfc86b2015-09-19 23:29:54 -05001108static struct clk * __init create_sysclk(const char *name)
1109{
1110 struct device_node *sysclk;
1111 struct clk *clk;
1112
1113 clk = sysclk_from_fixed(clockgen.node, name);
1114 if (!IS_ERR(clk))
1115 return clk;
1116
Scott Wood80b4ae72017-03-20 10:37:23 +08001117 clk = input_clock_by_name(name, "sysclk");
1118 if (!IS_ERR(clk))
1119 return clk;
1120
1121 clk = input_clock_by_index(name, 0);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001122 if (!IS_ERR(clk))
1123 return clk;
1124
1125 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
1126 if (sysclk) {
1127 clk = sysclk_from_fixed(sysclk, name);
1128 if (!IS_ERR(clk))
1129 return clk;
1130 }
1131
Scott Wood80b4ae72017-03-20 10:37:23 +08001132 pr_err("%s: No input sysclk\n", __func__);
1133 return NULL;
1134}
1135
1136static struct clk * __init create_coreclk(const char *name)
1137{
1138 struct clk *clk;
1139
1140 clk = input_clock_by_name(name, "coreclk");
1141 if (!IS_ERR(clk))
1142 return clk;
1143
1144 /*
1145 * This indicates a mix of legacy nodes with the new coreclk
1146 * mechanism, which should never happen. If this error occurs,
1147 * don't use the wrong input clock just because coreclk isn't
1148 * ready yet.
1149 */
1150 if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
1151 return clk;
1152
Scott Wood0dfc86b2015-09-19 23:29:54 -05001153 return NULL;
1154}
1155
1156/* Legacy node */
1157static void __init sysclk_init(struct device_node *node)
1158{
1159 struct clk *clk;
1160
1161 legacy_init_clockgen(node);
1162
1163 clk = clockgen.sysclk;
1164 if (clk)
1165 of_clk_add_provider(node, of_clk_src_simple_get, clk);
1166}
1167
1168#define PLL_KILL BIT(31)
1169
1170static void __init create_one_pll(struct clockgen *cg, int idx)
1171{
1172 u32 __iomem *reg;
1173 u32 mult;
1174 struct clockgen_pll *pll = &cg->pll[idx];
Scott Wood80b4ae72017-03-20 10:37:23 +08001175 const char *input = "cg-sysclk";
Scott Wood0dfc86b2015-09-19 23:29:54 -05001176 int i;
1177
1178 if (!(cg->info.pll_mask & (1 << idx)))
1179 return;
1180
Scott Wood80b4ae72017-03-20 10:37:23 +08001181 if (cg->coreclk && idx != PLATFORM_PLL) {
1182 if (IS_ERR(cg->coreclk))
1183 return;
1184
1185 input = "cg-coreclk";
1186 }
1187
Scott Wood9e19ca22015-09-19 23:29:55 -05001188 if (cg->info.flags & CG_VER3) {
1189 switch (idx) {
1190 case PLATFORM_PLL:
1191 reg = cg->regs + 0x60080;
1192 break;
1193 case CGA_PLL1:
1194 reg = cg->regs + 0x80;
1195 break;
1196 case CGA_PLL2:
1197 reg = cg->regs + 0xa0;
1198 break;
1199 case CGB_PLL1:
1200 reg = cg->regs + 0x10080;
1201 break;
1202 case CGB_PLL2:
1203 reg = cg->regs + 0x100a0;
1204 break;
1205 default:
1206 WARN_ONCE(1, "index %d\n", idx);
1207 return;
1208 }
1209 } else {
1210 if (idx == PLATFORM_PLL)
1211 reg = cg->regs + 0xc00;
1212 else
1213 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1214 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001215
1216 /* Get the multiple of PLL */
Scott Wood9e19ca22015-09-19 23:29:55 -05001217 mult = cg_in(cg, reg);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001218
1219 /* Check if this PLL is disabled */
1220 if (mult & PLL_KILL) {
1221 pr_debug("%s(): pll %p disabled\n", __func__, reg);
Tang Yuantian555eae92013-04-09 16:46:26 +08001222 return;
1223 }
1224
Scott Wood9e19ca22015-09-19 23:29:55 -05001225 if ((cg->info.flags & CG_VER3) ||
1226 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
Scott Wood0dfc86b2015-09-19 23:29:54 -05001227 mult = (mult & GENMASK(8, 1)) >> 1;
1228 else
1229 mult = (mult & GENMASK(6, 1)) >> 1;
Tang Yuantian555eae92013-04-09 16:46:26 +08001230
Scott Wood0dfc86b2015-09-19 23:29:54 -05001231 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1232 struct clk *clk;
Yuantian Tang45899dc2017-04-06 10:21:23 +08001233 int ret;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001234
Yuantian Tang36ab0462017-11-22 09:40:53 +08001235 /*
Yuantian Tangcc61ab92019-04-22 17:15:09 +08001236 * For platform PLL, there are MAX_PLL_DIV divider clocks.
Yuantian Tang36ab0462017-11-22 09:40:53 +08001237 * For core PLL, there are 4 divider clocks at most.
1238 */
1239 if (idx != PLATFORM_PLL && i >= 4)
1240 break;
1241
Scott Wood0dfc86b2015-09-19 23:29:54 -05001242 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1243 "cg-pll%d-div%d", idx, i + 1);
1244
1245 clk = clk_register_fixed_factor(NULL,
Scott Wood80b4ae72017-03-20 10:37:23 +08001246 pll->div[i].name, input, 0, mult, i + 1);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001247 if (IS_ERR(clk)) {
1248 pr_err("%s: %s: register failed %ld\n",
1249 __func__, pll->div[i].name, PTR_ERR(clk));
1250 continue;
1251 }
1252
1253 pll->div[i].clk = clk;
Yuantian Tang45899dc2017-04-06 10:21:23 +08001254 ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
1255 if (ret != 0)
Dan Carpenter8f99f5e2019-02-18 12:19:06 +03001256 pr_err("%s: %s: register to lookup table failed %d\n",
1257 __func__, pll->div[i].name, ret);
Yuantian Tang45899dc2017-04-06 10:21:23 +08001258
Tang Yuantian555eae92013-04-09 16:46:26 +08001259 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001260}
Tang Yuantian555eae92013-04-09 16:46:26 +08001261
Scott Wood0dfc86b2015-09-19 23:29:54 -05001262static void __init create_plls(struct clockgen *cg)
1263{
1264 int i;
Tang Yuantian555eae92013-04-09 16:46:26 +08001265
Scott Wood0dfc86b2015-09-19 23:29:54 -05001266 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1267 create_one_pll(cg, i);
1268}
1269
1270static void __init legacy_pll_init(struct device_node *np, int idx)
1271{
1272 struct clockgen_pll *pll;
1273 struct clk_onecell_data *onecell_data;
1274 struct clk **subclks;
1275 int count, rc;
1276
1277 legacy_init_clockgen(np);
1278
1279 pll = &clockgen.pll[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +08001280 count = of_property_count_strings(np, "clock-output-names");
Tang Yuantian555eae92013-04-09 16:46:26 +08001281
Scott Wood0dfc86b2015-09-19 23:29:54 -05001282 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1283 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001284 if (!subclks)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001285 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001286
Emil Medve6ef1cca2015-01-21 04:03:28 -06001287 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001288 if (!onecell_data)
Tang Yuantian555eae92013-04-09 16:46:26 +08001289 goto err_clks;
Tang Yuantian555eae92013-04-09 16:46:26 +08001290
Scott Wood0dfc86b2015-09-19 23:29:54 -05001291 if (count <= 3) {
1292 subclks[0] = pll->div[0].clk;
1293 subclks[1] = pll->div[1].clk;
1294 subclks[2] = pll->div[3].clk;
1295 } else {
1296 subclks[0] = pll->div[0].clk;
1297 subclks[1] = pll->div[1].clk;
1298 subclks[2] = pll->div[2].clk;
1299 subclks[3] = pll->div[3].clk;
Tang Yuantian555eae92013-04-09 16:46:26 +08001300 }
1301
1302 onecell_data->clks = subclks;
1303 onecell_data->clk_num = count;
1304
1305 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1306 if (rc) {
Rob Herringe665f022018-08-28 10:44:29 -05001307 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1308 __func__, np, rc);
Tang Yuantian555eae92013-04-09 16:46:26 +08001309 goto err_cell;
1310 }
1311
1312 return;
1313err_cell:
1314 kfree(onecell_data);
1315err_clks:
1316 kfree(subclks);
Tang Yuantian00fa6e52014-01-21 09:32:45 +08001317}
1318
Scott Wood0dfc86b2015-09-19 23:29:54 -05001319/* Legacy node */
Emil Medvea513b722015-01-21 04:03:31 -06001320static void __init pltfrm_pll_init(struct device_node *np)
1321{
Scott Wood0dfc86b2015-09-19 23:29:54 -05001322 legacy_pll_init(np, PLATFORM_PLL);
1323}
Emil Medvea513b722015-01-21 04:03:31 -06001324
Scott Wood0dfc86b2015-09-19 23:29:54 -05001325/* Legacy node */
1326static void __init core_pll_init(struct device_node *np)
1327{
1328 struct resource res;
1329 int idx;
1330
1331 if (of_address_to_resource(np, 0, &res))
1332 return;
1333
1334 if ((res.start & 0xfff) == 0xc00) {
1335 /*
1336 * ls1021a devtree labels the platform PLL
1337 * with the core PLL compatible
1338 */
1339 pltfrm_pll_init(np);
1340 } else {
1341 idx = (res.start & 0xf0) >> 5;
1342 legacy_pll_init(np, CGA_PLL1 + idx);
1343 }
1344}
1345
1346static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1347{
1348 struct clockgen *cg = data;
1349 struct clk *clk;
1350 struct clockgen_pll *pll;
1351 u32 type, idx;
1352
1353 if (clkspec->args_count < 2) {
1354 pr_err("%s: insufficient phandle args\n", __func__);
1355 return ERR_PTR(-EINVAL);
1356 }
1357
1358 type = clkspec->args[0];
1359 idx = clkspec->args[1];
1360
1361 switch (type) {
1362 case 0:
1363 if (idx != 0)
1364 goto bad_args;
1365 clk = cg->sysclk;
1366 break;
1367 case 1:
1368 if (idx >= ARRAY_SIZE(cg->cmux))
1369 goto bad_args;
1370 clk = cg->cmux[idx];
1371 break;
1372 case 2:
1373 if (idx >= ARRAY_SIZE(cg->hwaccel))
1374 goto bad_args;
1375 clk = cg->hwaccel[idx];
1376 break;
1377 case 3:
1378 if (idx >= ARRAY_SIZE(cg->fman))
1379 goto bad_args;
1380 clk = cg->fman[idx];
1381 break;
1382 case 4:
1383 pll = &cg->pll[PLATFORM_PLL];
1384 if (idx >= ARRAY_SIZE(pll->div))
1385 goto bad_args;
1386 clk = pll->div[idx].clk;
1387 break;
Scott Wood80b4ae72017-03-20 10:37:23 +08001388 case 5:
1389 if (idx != 0)
1390 goto bad_args;
1391 clk = cg->coreclk;
1392 if (IS_ERR(clk))
1393 clk = NULL;
1394 break;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001395 default:
1396 goto bad_args;
1397 }
1398
1399 if (!clk)
1400 return ERR_PTR(-ENOENT);
1401 return clk;
1402
1403bad_args:
1404 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1405 return ERR_PTR(-EINVAL);
1406}
1407
1408#ifdef CONFIG_PPC
1409#include <asm/mpc85xx.h>
1410
1411static const u32 a4510_svrs[] __initconst = {
1412 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1413 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1414 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1415 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1416 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1417 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1418 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1419 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1420 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1421 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1422 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1423 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1424 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1425};
1426
1427#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1428
1429static bool __init has_erratum_a4510(void)
1430{
1431 u32 svr = mfspr(SPRN_SVR);
1432 int i;
1433
1434 svr &= ~SVR_SECURITY;
1435
1436 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1437 if (svr == a4510_svrs[i])
1438 return true;
1439 }
1440
1441 return false;
1442}
1443#else
1444static bool __init has_erratum_a4510(void)
1445{
1446 return false;
1447}
1448#endif
1449
1450static void __init clockgen_init(struct device_node *np)
1451{
1452 int i, ret;
1453 bool is_old_ls1021a = false;
1454
1455 /* May have already been called by a legacy probe */
1456 if (clockgen.node)
1457 return;
1458
1459 clockgen.node = np;
1460 clockgen.regs = of_iomap(np, 0);
1461 if (!clockgen.regs &&
1462 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1463 /* Compatibility hack for old, broken device trees */
1464 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1465 is_old_ls1021a = true;
1466 }
1467 if (!clockgen.regs) {
Rob Herringe665f022018-08-28 10:44:29 -05001468 pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np);
Emil Medvea513b722015-01-21 04:03:31 -06001469 return;
1470 }
1471
Scott Wood0dfc86b2015-09-19 23:29:54 -05001472 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1473 if (of_device_is_compatible(np, chipinfo[i].compat))
1474 break;
1475 if (is_old_ls1021a &&
1476 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1477 break;
Emil Medvea513b722015-01-21 04:03:31 -06001478 }
1479
Scott Wood0dfc86b2015-09-19 23:29:54 -05001480 if (i == ARRAY_SIZE(chipinfo)) {
Rob Herring16673932017-07-18 16:42:52 -05001481 pr_err("%s: unknown clockgen node %pOF\n", __func__, np);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001482 goto err;
Emil Medvea513b722015-01-21 04:03:31 -06001483 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001484 clockgen.info = chipinfo[i];
Emil Medvea513b722015-01-21 04:03:31 -06001485
Scott Wood0dfc86b2015-09-19 23:29:54 -05001486 if (clockgen.info.guts_compat) {
1487 struct device_node *guts;
Emil Medvea513b722015-01-21 04:03:31 -06001488
Scott Wood0dfc86b2015-09-19 23:29:54 -05001489 guts = of_find_compatible_node(NULL, NULL,
1490 clockgen.info.guts_compat);
1491 if (guts) {
1492 clockgen.guts = of_iomap(guts, 0);
1493 if (!clockgen.guts) {
Rob Herring16673932017-07-18 16:42:52 -05001494 pr_err("%s: Couldn't map %pOF regs\n", __func__,
1495 guts);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001496 }
Yangtao Li70af6c52018-12-26 08:14:42 -05001497 of_node_put(guts);
Emil Medvea513b722015-01-21 04:03:31 -06001498 }
1499
Emil Medvea513b722015-01-21 04:03:31 -06001500 }
1501
Scott Wood0dfc86b2015-09-19 23:29:54 -05001502 if (has_erratum_a4510())
1503 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1504
1505 clockgen.sysclk = create_sysclk("cg-sysclk");
Scott Wood80b4ae72017-03-20 10:37:23 +08001506 clockgen.coreclk = create_coreclk("cg-coreclk");
Scott Wood0dfc86b2015-09-19 23:29:54 -05001507 create_plls(&clockgen);
1508 create_muxes(&clockgen);
1509
1510 if (clockgen.info.init_periph)
1511 clockgen.info.init_periph(&clockgen);
1512
1513 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1514 if (ret) {
Rob Herringe665f022018-08-28 10:44:29 -05001515 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1516 __func__, np, ret);
Emil Medvea513b722015-01-21 04:03:31 -06001517 }
1518
1519 return;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001520err:
1521 iounmap(clockgen.regs);
1522 clockgen.regs = NULL;
Emil Medvea513b722015-01-21 04:03:31 -06001523}
1524
Scott Wood0dfc86b2015-09-19 23:29:54 -05001525CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1526CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
Yuantian Tangb8b211c2018-10-31 15:46:16 +08001527CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
1528CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
Tang Yuantian44709352016-11-24 10:36:55 +08001529CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001530CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
Yuantian Tang95089f62019-04-24 09:19:12 +08001531CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init);
Hou Zhiqiange994412c2015-10-23 16:01:21 +08001532CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
Mingkai Hu80e52192016-09-07 11:48:30 +08001533CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
Yuantian Tange0c888c42017-04-06 10:21:22 +08001534CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
Scott Wood9e19ca22015-09-19 23:29:55 -05001535CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
Vabhav Sharma78a5ba82019-04-26 06:53:38 +00001536CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init);
Yuantian Tangb8b211c2018-10-31 15:46:16 +08001537CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init);
1538CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init);
1539CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init);
1540CLK_OF_DECLARE(qoriq_clockgen_p5020, "fsl,p5020-clockgen", clockgen_init);
1541CLK_OF_DECLARE(qoriq_clockgen_p5040, "fsl,p5040-clockgen", clockgen_init);
1542CLK_OF_DECLARE(qoriq_clockgen_t1023, "fsl,t1023-clockgen", clockgen_init);
1543CLK_OF_DECLARE(qoriq_clockgen_t1040, "fsl,t1040-clockgen", clockgen_init);
1544CLK_OF_DECLARE(qoriq_clockgen_t2080, "fsl,t2080-clockgen", clockgen_init);
1545CLK_OF_DECLARE(qoriq_clockgen_t4240, "fsl,t4240-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001546
1547/* Legacy nodes */
Kevin Hao66619ac2014-12-03 16:53:53 +08001548CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1549CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1550CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1551CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1552CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1553CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
Emil Medvea513b722015-01-21 04:03:31 -06001554CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1555CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);