blob: 46101c6a20f261830e6195498d09ab2ef6359636 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Tang Yuantian555eae92013-04-09 16:46:26 +08002/*
3 * Copyright 2013 Freescale Semiconductor, Inc.
4 *
Tang Yuantian93a17c02015-01-15 14:03:41 +08005 * clock driver for Freescale QorIQ SoCs.
Tang Yuantian555eae92013-04-09 16:46:26 +08006 */
Emil Medvec88b2b62015-01-21 04:03:29 -06007
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Scott Wood0dfc86b2015-09-19 23:29:54 -050010#include <linux/clk.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080011#include <linux/clk-provider.h>
Yuantian Tang45899dc2017-04-06 10:21:23 +080012#include <linux/clkdev.h>
Scott Wood0dfc86b2015-09-19 23:29:54 -050013#include <linux/fsl/guts.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080014#include <linux/io.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
Rob Herringc11eede2013-11-10 23:19:08 -060017#include <linux/of_address.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080018#include <linux/of_platform.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21
Scott Wood0dfc86b2015-09-19 23:29:54 -050022#define PLL_DIV1 0
23#define PLL_DIV2 1
24#define PLL_DIV3 2
25#define PLL_DIV4 3
26
27#define PLATFORM_PLL 0
28#define CGA_PLL1 1
29#define CGA_PLL2 2
30#define CGA_PLL3 3
31#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
32#define CGB_PLL1 4
33#define CGB_PLL2 5
Zhao Qiange9501b92020-09-16 11:03:10 +080034#define MAX_PLL_DIV 32
Scott Wood0dfc86b2015-09-19 23:29:54 -050035
36struct clockgen_pll_div {
37 struct clk *clk;
38 char name[32];
Tang Yuantian555eae92013-04-09 16:46:26 +080039};
40
Scott Wood0dfc86b2015-09-19 23:29:54 -050041struct clockgen_pll {
Yuantian Tangcc61ab92019-04-22 17:15:09 +080042 struct clockgen_pll_div div[MAX_PLL_DIV];
Scott Wood0dfc86b2015-09-19 23:29:54 -050043};
Tang Yuantian555eae92013-04-09 16:46:26 +080044
Scott Wood0dfc86b2015-09-19 23:29:54 -050045#define CLKSEL_VALID 1
46#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
47
48struct clockgen_sourceinfo {
49 u32 flags; /* CLKSEL_xxx */
50 int pll; /* CGx_PLLn */
51 int div; /* PLL_DIVn */
52};
53
54#define NUM_MUX_PARENTS 16
55
56struct clockgen_muxinfo {
57 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
58};
59
60#define NUM_HWACCEL 5
61#define NUM_CMUX 8
62
63struct clockgen;
64
65/*
66 * cmux freq must be >= platform pll.
67 * If not set, cmux freq must be >= platform pll/2
68 */
69#define CG_CMUX_GE_PLAT 1
Scott Wood9e19ca22015-09-19 23:29:55 -050070
Scott Wood0dfc86b2015-09-19 23:29:54 -050071#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
Scott Wood9e19ca22015-09-19 23:29:55 -050072#define CG_VER3 4 /* version 3 cg: reg layout different */
73#define CG_LITTLE_ENDIAN 8
Scott Wood0dfc86b2015-09-19 23:29:54 -050074
75struct clockgen_chipinfo {
76 const char *compat, *guts_compat;
77 const struct clockgen_muxinfo *cmux_groups[2];
78 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
79 void (*init_periph)(struct clockgen *cg);
Yogesh Gaur42614b52019-04-25 09:47:48 +000080 int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */
Scott Wood0dfc86b2015-09-19 23:29:54 -050081 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
82 u32 flags; /* CG_xxx */
83};
84
85struct clockgen {
86 struct device_node *node;
87 void __iomem *regs;
88 struct clockgen_chipinfo info; /* mutable copy */
Scott Wood80b4ae72017-03-20 10:37:23 +080089 struct clk *sysclk, *coreclk;
Scott Wood0dfc86b2015-09-19 23:29:54 -050090 struct clockgen_pll pll[6];
91 struct clk *cmux[NUM_CMUX];
92 struct clk *hwaccel[NUM_HWACCEL];
93 struct clk *fman[2];
94 struct ccsr_guts __iomem *guts;
95};
96
97static struct clockgen clockgen;
Mian Yousaf Kaukabcf1e0442020-04-21 10:30:00 +020098static bool add_cpufreq_dev __initdata;
Scott Wood0dfc86b2015-09-19 23:29:54 -050099
Scott Wood9e19ca22015-09-19 23:29:55 -0500100static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
101{
102 if (cg->info.flags & CG_LITTLE_ENDIAN)
103 iowrite32(val, reg);
104 else
105 iowrite32be(val, reg);
106}
107
108static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
109{
110 u32 val;
111
112 if (cg->info.flags & CG_LITTLE_ENDIAN)
113 val = ioread32(reg);
114 else
115 val = ioread32be(reg);
116
117 return val;
118}
119
Scott Wood0dfc86b2015-09-19 23:29:54 -0500120static const struct clockgen_muxinfo p2041_cmux_grp1 = {
121 {
122 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
123 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
124 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
125 }
126};
127
128static const struct clockgen_muxinfo p2041_cmux_grp2 = {
129 {
130 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
Scott Wood2c7693e2015-10-22 23:21:46 -0500131 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
132 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
Scott Wood0dfc86b2015-09-19 23:29:54 -0500133 }
134};
135
136static const struct clockgen_muxinfo p5020_cmux_grp1 = {
137 {
138 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
139 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
140 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
141 }
142};
143
144static const struct clockgen_muxinfo p5020_cmux_grp2 = {
145 {
146 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
147 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
148 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
149 }
150};
151
152static const struct clockgen_muxinfo p5040_cmux_grp1 = {
153 {
154 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
155 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
156 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
157 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
158 }
159};
160
161static const struct clockgen_muxinfo p5040_cmux_grp2 = {
162 {
163 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
164 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
165 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
166 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
167 }
168};
169
170static const struct clockgen_muxinfo p4080_cmux_grp1 = {
171 {
172 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
173 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
174 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
175 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
176 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
177 }
178};
179
180static const struct clockgen_muxinfo p4080_cmux_grp2 = {
181 {
182 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
183 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
184 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
185 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
186 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
187 }
188};
189
190static const struct clockgen_muxinfo t1023_cmux = {
191 {
192 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
193 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
194 }
195};
196
197static const struct clockgen_muxinfo t1040_cmux = {
198 {
199 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
200 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
201 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
202 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
203 }
204};
205
206
207static const struct clockgen_muxinfo clockgen2_cmux_cga = {
208 {
209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
211 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
212 {},
213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
215 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
216 {},
217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
219 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
220 },
221};
222
223static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
224 {
225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
227 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
228 {},
229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
231 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
232 },
233};
234
235static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
236 {
237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
239 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
240 {},
241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
243 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
244 },
245};
246
Michael Krummsdorf92df3a92020-06-10 13:38:37 +0200247static const struct clockgen_muxinfo ls1021a_cmux = {
248 {
249 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
250 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
252 }
253};
254
Yuantian Tang95089f62019-04-24 09:19:12 +0800255static const struct clockgen_muxinfo ls1028a_hwa1 = {
256 {
257 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
258 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
259 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
260 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
261 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
262 {},
263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
264 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
265 },
266};
267
268static const struct clockgen_muxinfo ls1028a_hwa2 = {
269 {
270 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
271 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
272 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
273 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
274 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
275 {},
276 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
277 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
278 },
279};
280
281static const struct clockgen_muxinfo ls1028a_hwa3 = {
282 {
283 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
284 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
285 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
286 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
287 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
288 {},
289 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
290 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
291 },
292};
293
294static const struct clockgen_muxinfo ls1028a_hwa4 = {
295 {
296 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
297 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
298 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
299 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
300 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
301 {},
302 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
303 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
304 },
305};
306
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800307static const struct clockgen_muxinfo ls1043a_hwa1 = {
308 {
309 {},
310 {},
311 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
312 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
313 {},
314 {},
315 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
316 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
317 },
318};
319
320static const struct clockgen_muxinfo ls1043a_hwa2 = {
321 {
322 {},
323 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
324 {},
325 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
326 },
327};
328
Mingkai Hu80e52192016-09-07 11:48:30 +0800329static const struct clockgen_muxinfo ls1046a_hwa1 = {
330 {
331 {},
332 {},
333 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
334 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
335 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
336 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
337 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
338 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
339 },
340};
341
342static const struct clockgen_muxinfo ls1046a_hwa2 = {
343 {
344 {},
345 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
346 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
347 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
348 {},
349 {},
350 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
351 },
352};
353
Yangbo Lua9328722019-12-16 18:01:11 +0800354static const struct clockgen_muxinfo ls1088a_hwa1 = {
355 {
356 {},
357 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
358 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
359 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
360 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
361 {},
362 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
363 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
364 },
365};
366
367static const struct clockgen_muxinfo ls1088a_hwa2 = {
368 {
369 {},
370 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
371 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
372 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
373 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
374 {},
375 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
376 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
377 },
378};
379
Tang Yuantian44709352016-11-24 10:36:55 +0800380static const struct clockgen_muxinfo ls1012a_cmux = {
381 {
382 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
383 {},
384 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
385 }
386};
387
Scott Wood0dfc86b2015-09-19 23:29:54 -0500388static const struct clockgen_muxinfo t1023_hwa1 = {
389 {
390 {},
391 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
392 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
393 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
394 },
395};
396
397static const struct clockgen_muxinfo t1023_hwa2 = {
398 {
399 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
400 },
401};
402
403static const struct clockgen_muxinfo t2080_hwa1 = {
404 {
405 {},
406 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
407 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
408 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
409 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
410 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
411 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
412 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
413 },
414};
415
416static const struct clockgen_muxinfo t2080_hwa2 = {
417 {
418 {},
419 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
420 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
421 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
422 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
423 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
424 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
425 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
426 },
427};
428
429static const struct clockgen_muxinfo t4240_hwa1 = {
430 {
431 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
432 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
433 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
434 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
435 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
436 {},
437 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
438 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
439 },
440};
441
442static const struct clockgen_muxinfo t4240_hwa4 = {
443 {
444 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
445 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
446 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
447 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
448 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
449 },
450};
451
452static const struct clockgen_muxinfo t4240_hwa5 = {
453 {
454 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
455 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
456 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
457 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
458 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
459 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
460 },
461};
462
463#define RCWSR7_FM1_CLK_SEL 0x40000000
464#define RCWSR7_FM2_CLK_SEL 0x20000000
465#define RCWSR7_HWA_ASYNC_DIV 0x04000000
466
467static void __init p2041_init_periph(struct clockgen *cg)
Tang Yuantian555eae92013-04-09 16:46:26 +0800468{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500469 u32 reg;
470
471 reg = ioread32be(&cg->guts->rcwsr[7]);
472
473 if (reg & RCWSR7_FM1_CLK_SEL)
474 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
475 else
476 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
477}
478
479static void __init p4080_init_periph(struct clockgen *cg)
480{
481 u32 reg;
482
483 reg = ioread32be(&cg->guts->rcwsr[7]);
484
485 if (reg & RCWSR7_FM1_CLK_SEL)
486 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
487 else
488 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
489
490 if (reg & RCWSR7_FM2_CLK_SEL)
491 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
492 else
493 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
494}
495
496static void __init p5020_init_periph(struct clockgen *cg)
497{
498 u32 reg;
499 int div = PLL_DIV2;
500
501 reg = ioread32be(&cg->guts->rcwsr[7]);
502 if (reg & RCWSR7_HWA_ASYNC_DIV)
503 div = PLL_DIV4;
504
505 if (reg & RCWSR7_FM1_CLK_SEL)
506 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
507 else
508 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
509}
510
511static void __init p5040_init_periph(struct clockgen *cg)
512{
513 u32 reg;
514 int div = PLL_DIV2;
515
516 reg = ioread32be(&cg->guts->rcwsr[7]);
517 if (reg & RCWSR7_HWA_ASYNC_DIV)
518 div = PLL_DIV4;
519
520 if (reg & RCWSR7_FM1_CLK_SEL)
521 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
522 else
523 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
524
525 if (reg & RCWSR7_FM2_CLK_SEL)
526 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
527 else
528 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
529}
530
531static void __init t1023_init_periph(struct clockgen *cg)
532{
533 cg->fman[0] = cg->hwaccel[1];
534}
535
536static void __init t1040_init_periph(struct clockgen *cg)
537{
538 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
539}
540
541static void __init t2080_init_periph(struct clockgen *cg)
542{
543 cg->fman[0] = cg->hwaccel[0];
544}
545
546static void __init t4240_init_periph(struct clockgen *cg)
547{
548 cg->fman[0] = cg->hwaccel[3];
549 cg->fman[1] = cg->hwaccel[4];
550}
551
552static const struct clockgen_chipinfo chipinfo[] = {
553 {
554 .compat = "fsl,b4420-clockgen",
555 .guts_compat = "fsl,b4860-device-config",
556 .init_periph = t2080_init_periph,
557 .cmux_groups = {
558 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
559 },
560 .hwaccel = {
561 &t2080_hwa1
562 },
563 .cmux_to_group = {
564 0, 1, 1, 1, -1
565 },
566 .pll_mask = 0x3f,
567 .flags = CG_PLL_8BIT,
568 },
569 {
570 .compat = "fsl,b4860-clockgen",
571 .guts_compat = "fsl,b4860-device-config",
572 .init_periph = t2080_init_periph,
573 .cmux_groups = {
574 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
575 },
576 .hwaccel = {
577 &t2080_hwa1
578 },
579 .cmux_to_group = {
580 0, 1, 1, 1, -1
581 },
582 .pll_mask = 0x3f,
583 .flags = CG_PLL_8BIT,
584 },
585 {
586 .compat = "fsl,ls1021a-clockgen",
587 .cmux_groups = {
Michael Krummsdorf92df3a92020-06-10 13:38:37 +0200588 &ls1021a_cmux
Scott Wood0dfc86b2015-09-19 23:29:54 -0500589 },
590 .cmux_to_group = {
591 0, -1
592 },
593 .pll_mask = 0x03,
594 },
595 {
Yuantian Tang95089f62019-04-24 09:19:12 +0800596 .compat = "fsl,ls1028a-clockgen",
597 .cmux_groups = {
598 &clockgen2_cmux_cga12
599 },
600 .hwaccel = {
601 &ls1028a_hwa1, &ls1028a_hwa2,
602 &ls1028a_hwa3, &ls1028a_hwa4
603 },
604 .cmux_to_group = {
605 0, 0, 0, 0, -1
606 },
607 .pll_mask = 0x07,
608 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
609 },
610 {
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800611 .compat = "fsl,ls1043a-clockgen",
612 .init_periph = t2080_init_periph,
613 .cmux_groups = {
614 &t1040_cmux
615 },
616 .hwaccel = {
617 &ls1043a_hwa1, &ls1043a_hwa2
618 },
619 .cmux_to_group = {
620 0, -1
621 },
622 .pll_mask = 0x07,
623 .flags = CG_PLL_8BIT,
624 },
625 {
Mingkai Hu80e52192016-09-07 11:48:30 +0800626 .compat = "fsl,ls1046a-clockgen",
627 .init_periph = t2080_init_periph,
628 .cmux_groups = {
629 &t1040_cmux
630 },
631 .hwaccel = {
632 &ls1046a_hwa1, &ls1046a_hwa2
633 },
634 .cmux_to_group = {
635 0, -1
636 },
637 .pll_mask = 0x07,
638 .flags = CG_PLL_8BIT,
639 },
640 {
Yuantian Tange0c888c42017-04-06 10:21:22 +0800641 .compat = "fsl,ls1088a-clockgen",
642 .cmux_groups = {
643 &clockgen2_cmux_cga12
644 },
Yangbo Lua9328722019-12-16 18:01:11 +0800645 .hwaccel = {
646 &ls1088a_hwa1, &ls1088a_hwa2
647 },
Yuantian Tange0c888c42017-04-06 10:21:22 +0800648 .cmux_to_group = {
649 0, 0, -1
650 },
651 .pll_mask = 0x07,
652 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
653 },
654 {
Tang Yuantian44709352016-11-24 10:36:55 +0800655 .compat = "fsl,ls1012a-clockgen",
656 .cmux_groups = {
657 &ls1012a_cmux
658 },
659 .cmux_to_group = {
660 0, -1
661 },
662 .pll_mask = 0x03,
663 },
664 {
Scott Wood9e19ca22015-09-19 23:29:55 -0500665 .compat = "fsl,ls2080a-clockgen",
666 .cmux_groups = {
667 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
668 },
669 .cmux_to_group = {
670 0, 0, 1, 1, -1
671 },
672 .pll_mask = 0x37,
673 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
674 },
675 {
Vabhav Sharma78a5ba82019-04-26 06:53:38 +0000676 .compat = "fsl,lx2160a-clockgen",
677 .cmux_groups = {
678 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
679 },
680 .cmux_to_group = {
681 0, 0, 0, 0, 1, 1, 1, 1, -1
682 },
683 .pll_mask = 0x37,
684 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
685 },
686 {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500687 .compat = "fsl,p2041-clockgen",
688 .guts_compat = "fsl,qoriq-device-config-1.0",
689 .init_periph = p2041_init_periph,
690 .cmux_groups = {
691 &p2041_cmux_grp1, &p2041_cmux_grp2
692 },
693 .cmux_to_group = {
694 0, 0, 1, 1, -1
695 },
696 .pll_mask = 0x07,
697 },
698 {
699 .compat = "fsl,p3041-clockgen",
700 .guts_compat = "fsl,qoriq-device-config-1.0",
701 .init_periph = p2041_init_periph,
702 .cmux_groups = {
703 &p2041_cmux_grp1, &p2041_cmux_grp2
704 },
705 .cmux_to_group = {
706 0, 0, 1, 1, -1
707 },
708 .pll_mask = 0x07,
709 },
710 {
711 .compat = "fsl,p4080-clockgen",
712 .guts_compat = "fsl,qoriq-device-config-1.0",
713 .init_periph = p4080_init_periph,
714 .cmux_groups = {
715 &p4080_cmux_grp1, &p4080_cmux_grp2
716 },
717 .cmux_to_group = {
Yogesh Gaur42614b52019-04-25 09:47:48 +0000718 0, 0, 0, 0, 1, 1, 1, 1, -1
Scott Wood0dfc86b2015-09-19 23:29:54 -0500719 },
720 .pll_mask = 0x1f,
721 },
722 {
723 .compat = "fsl,p5020-clockgen",
724 .guts_compat = "fsl,qoriq-device-config-1.0",
725 .init_periph = p5020_init_periph,
726 .cmux_groups = {
Nathan Huckleberrya95fb582019-06-27 15:06:42 -0700727 &p5020_cmux_grp1, &p5020_cmux_grp2
Scott Wood0dfc86b2015-09-19 23:29:54 -0500728 },
729 .cmux_to_group = {
730 0, 1, -1
731 },
732 .pll_mask = 0x07,
733 },
734 {
735 .compat = "fsl,p5040-clockgen",
736 .guts_compat = "fsl,p5040-device-config",
737 .init_periph = p5040_init_periph,
738 .cmux_groups = {
739 &p5040_cmux_grp1, &p5040_cmux_grp2
740 },
741 .cmux_to_group = {
742 0, 0, 1, 1, -1
743 },
744 .pll_mask = 0x0f,
745 },
746 {
747 .compat = "fsl,t1023-clockgen",
748 .guts_compat = "fsl,t1023-device-config",
749 .init_periph = t1023_init_periph,
750 .cmux_groups = {
751 &t1023_cmux
752 },
753 .hwaccel = {
754 &t1023_hwa1, &t1023_hwa2
755 },
756 .cmux_to_group = {
757 0, 0, -1
758 },
759 .pll_mask = 0x03,
760 .flags = CG_PLL_8BIT,
761 },
762 {
763 .compat = "fsl,t1040-clockgen",
764 .guts_compat = "fsl,t1040-device-config",
765 .init_periph = t1040_init_periph,
766 .cmux_groups = {
767 &t1040_cmux
768 },
769 .cmux_to_group = {
770 0, 0, 0, 0, -1
771 },
772 .pll_mask = 0x07,
773 .flags = CG_PLL_8BIT,
774 },
775 {
776 .compat = "fsl,t2080-clockgen",
777 .guts_compat = "fsl,t2080-device-config",
778 .init_periph = t2080_init_periph,
779 .cmux_groups = {
780 &clockgen2_cmux_cga12
781 },
782 .hwaccel = {
783 &t2080_hwa1, &t2080_hwa2
784 },
785 .cmux_to_group = {
786 0, -1
787 },
788 .pll_mask = 0x07,
789 .flags = CG_PLL_8BIT,
790 },
791 {
792 .compat = "fsl,t4240-clockgen",
793 .guts_compat = "fsl,t4240-device-config",
794 .init_periph = t4240_init_periph,
795 .cmux_groups = {
796 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
797 },
798 .hwaccel = {
799 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
800 },
801 .cmux_to_group = {
802 0, 0, 1, -1
803 },
804 .pll_mask = 0x3f,
805 .flags = CG_PLL_8BIT,
806 },
807 {},
808};
809
810struct mux_hwclock {
811 struct clk_hw hw;
812 struct clockgen *cg;
813 const struct clockgen_muxinfo *info;
814 u32 __iomem *reg;
815 u8 parent_to_clksel[NUM_MUX_PARENTS];
816 s8 clksel_to_parent[NUM_MUX_PARENTS];
817 int num_parents;
818};
819
820#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
821#define CLKSEL_MASK 0x78000000
822#define CLKSEL_SHIFT 27
823
824static int mux_set_parent(struct clk_hw *hw, u8 idx)
825{
826 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800827 u32 clksel;
828
Scott Wood0dfc86b2015-09-19 23:29:54 -0500829 if (idx >= hwc->num_parents)
830 return -EINVAL;
831
832 clksel = hwc->parent_to_clksel[idx];
Scott Wood9e19ca22015-09-19 23:29:55 -0500833 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
Tang Yuantian555eae92013-04-09 16:46:26 +0800834
835 return 0;
836}
837
Scott Wood0dfc86b2015-09-19 23:29:54 -0500838static u8 mux_get_parent(struct clk_hw *hw)
Tang Yuantian555eae92013-04-09 16:46:26 +0800839{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500840 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800841 u32 clksel;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500842 s8 ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800843
Scott Wood9e19ca22015-09-19 23:29:55 -0500844 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Tang Yuantian555eae92013-04-09 16:46:26 +0800845
Scott Wood0dfc86b2015-09-19 23:29:54 -0500846 ret = hwc->clksel_to_parent[clksel];
847 if (ret < 0) {
848 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
849 return 0;
850 }
851
852 return ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800853}
854
Emil Medve334680d2015-01-21 04:03:27 -0600855static const struct clk_ops cmux_ops = {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500856 .get_parent = mux_get_parent,
857 .set_parent = mux_set_parent,
Tang Yuantian555eae92013-04-09 16:46:26 +0800858};
859
Scott Wood0dfc86b2015-09-19 23:29:54 -0500860/*
861 * Don't allow setting for now, as the clock options haven't been
862 * sanitized for additional restrictions.
863 */
864static const struct clk_ops hwaccel_ops = {
865 .get_parent = mux_get_parent,
866};
867
868static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
869 struct mux_hwclock *hwc,
870 int idx)
871{
872 int pll, div;
873
874 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
875 return NULL;
876
877 pll = hwc->info->clksel[idx].pll;
878 div = hwc->info->clksel[idx].div;
879
880 return &cg->pll[pll].div[div];
881}
882
883static struct clk * __init create_mux_common(struct clockgen *cg,
884 struct mux_hwclock *hwc,
885 const struct clk_ops *ops,
886 unsigned long min_rate,
Scott Wood7c1c5412016-10-17 13:42:23 -0500887 unsigned long max_rate,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500888 unsigned long pct80_rate,
889 const char *fmt, int idx)
890{
891 struct clk_init_data init = {};
892 struct clk *clk;
893 const struct clockgen_pll_div *div;
894 const char *parent_names[NUM_MUX_PARENTS];
895 char name[32];
896 int i, j;
897
898 snprintf(name, sizeof(name), fmt, idx);
899
900 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
901 unsigned long rate;
902
903 hwc->clksel_to_parent[i] = -1;
904
905 div = get_pll_div(cg, hwc, i);
906 if (!div)
907 continue;
908
909 rate = clk_get_rate(div->clk);
910
911 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
912 rate > pct80_rate)
913 continue;
914 if (rate < min_rate)
915 continue;
Scott Wood7c1c5412016-10-17 13:42:23 -0500916 if (rate > max_rate)
917 continue;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500918
919 parent_names[j] = div->name;
920 hwc->parent_to_clksel[j] = i;
921 hwc->clksel_to_parent[i] = j;
922 j++;
923 }
924
925 init.name = name;
926 init.ops = ops;
927 init.parent_names = parent_names;
928 init.num_parents = hwc->num_parents = j;
929 init.flags = 0;
930 hwc->hw.init = &init;
931 hwc->cg = cg;
932
933 clk = clk_register(NULL, &hwc->hw);
934 if (IS_ERR(clk)) {
935 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
936 PTR_ERR(clk));
937 kfree(hwc);
938 return NULL;
939 }
940
941 return clk;
942}
943
944static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
945{
946 struct mux_hwclock *hwc;
947 const struct clockgen_pll_div *div;
948 unsigned long plat_rate, min_rate;
Scott Wood7c1c5412016-10-17 13:42:23 -0500949 u64 max_rate, pct80_rate;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500950 u32 clksel;
951
952 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
953 if (!hwc)
954 return NULL;
955
Tang Yuantian89641932016-08-15 15:28:20 +0800956 if (cg->info.flags & CG_VER3)
957 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
958 else
959 hwc->reg = cg->regs + 0x20 * idx;
960
Scott Wood0dfc86b2015-09-19 23:29:54 -0500961 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
962
963 /*
964 * Find the rate for the default clksel, and treat it as the
965 * maximum rated core frequency. If this is an incorrect
966 * assumption, certain clock options (possibly including the
967 * default clksel) may be inappropriately excluded on certain
968 * chips.
969 */
Scott Wood9e19ca22015-09-19 23:29:55 -0500970 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500971 div = get_pll_div(cg, hwc, clksel);
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530972 if (!div) {
973 kfree(hwc);
Scott Wood0dfc86b2015-09-19 23:29:54 -0500974 return NULL;
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530975 }
Scott Wood0dfc86b2015-09-19 23:29:54 -0500976
Scott Wood7c1c5412016-10-17 13:42:23 -0500977 max_rate = clk_get_rate(div->clk);
978 pct80_rate = max_rate * 8;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500979 do_div(pct80_rate, 10);
980
981 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
982
983 if (cg->info.flags & CG_CMUX_GE_PLAT)
984 min_rate = plat_rate;
985 else
986 min_rate = plat_rate / 2;
987
Scott Wood7c1c5412016-10-17 13:42:23 -0500988 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500989 pct80_rate, "cg-cmux%d", idx);
990}
991
992static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
993{
994 struct mux_hwclock *hwc;
995
996 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
997 if (!hwc)
998 return NULL;
999
1000 hwc->reg = cg->regs + 0x20 * idx + 0x10;
1001 hwc->info = cg->info.hwaccel[idx];
1002
Scott Wood7c1c5412016-10-17 13:42:23 -05001003 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
Scott Wood0dfc86b2015-09-19 23:29:54 -05001004 "cg-hwaccel%d", idx);
1005}
1006
1007static void __init create_muxes(struct clockgen *cg)
1008{
1009 int i;
1010
1011 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
1012 if (cg->info.cmux_to_group[i] < 0)
1013 break;
1014 if (cg->info.cmux_to_group[i] >=
1015 ARRAY_SIZE(cg->info.cmux_groups)) {
1016 WARN_ON_ONCE(1);
1017 continue;
1018 }
1019
1020 cg->cmux[i] = create_one_cmux(cg, i);
1021 }
1022
1023 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
1024 if (!cg->info.hwaccel[i])
1025 continue;
1026
1027 cg->hwaccel[i] = create_one_hwaccel(cg, i);
1028 }
1029}
1030
Mian Yousaf Kaukabcf1e0442020-04-21 10:30:00 +02001031static void __init _clockgen_init(struct device_node *np, bool legacy);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001032
Scott Wood80b4ae72017-03-20 10:37:23 +08001033/*
1034 * Legacy nodes may get probed before the parent clockgen node.
1035 * It is assumed that device trees with legacy nodes will not
1036 * contain a "clocks" property -- otherwise the input clocks may
1037 * not be initialized at this point.
1038 */
Scott Wood0dfc86b2015-09-19 23:29:54 -05001039static void __init legacy_init_clockgen(struct device_node *np)
1040{
1041 if (!clockgen.node)
Mian Yousaf Kaukabcf1e0442020-04-21 10:30:00 +02001042 _clockgen_init(of_get_parent(np), true);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001043}
1044
1045/* Legacy node */
Tang Yuantian555eae92013-04-09 16:46:26 +08001046static void __init core_mux_init(struct device_node *np)
1047{
1048 struct clk *clk;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001049 struct resource res;
1050 int idx, rc;
Tang Yuantian555eae92013-04-09 16:46:26 +08001051
Scott Wood0dfc86b2015-09-19 23:29:54 -05001052 legacy_init_clockgen(np);
Tang Yuantian555eae92013-04-09 16:46:26 +08001053
Scott Wood0dfc86b2015-09-19 23:29:54 -05001054 if (of_address_to_resource(np, 0, &res))
Tang Yuantian555eae92013-04-09 16:46:26 +08001055 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001056
Scott Wood0dfc86b2015-09-19 23:29:54 -05001057 idx = (res.start & 0xf0) >> 5;
1058 clk = clockgen.cmux[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +08001059
1060 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
1061 if (rc) {
Rob Herringe665f022018-08-28 10:44:29 -05001062 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1063 __func__, np, rc);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001064 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001065 }
Tang Yuantian555eae92013-04-09 16:46:26 +08001066}
1067
Julia Lawall3432a2e2016-04-18 16:55:34 +02001068static struct clk __init
1069*sysclk_from_fixed(struct device_node *node, const char *name)
Tang Yuantian555eae92013-04-09 16:46:26 +08001070{
Scott Wood0dfc86b2015-09-19 23:29:54 -05001071 u32 rate;
Tang Yuantian555eae92013-04-09 16:46:26 +08001072
Scott Wood0dfc86b2015-09-19 23:29:54 -05001073 if (of_property_read_u32(node, "clock-frequency", &rate))
1074 return ERR_PTR(-ENODEV);
1075
Stephen Boydec3f2fc2016-03-01 11:00:19 -08001076 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001077}
1078
Scott Wood80b4ae72017-03-20 10:37:23 +08001079static struct clk __init *input_clock(const char *name, struct clk *clk)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001080{
Scott Wood80b4ae72017-03-20 10:37:23 +08001081 const char *input_name;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001082
1083 /* Register the input clock under the desired name. */
Scott Wood80b4ae72017-03-20 10:37:23 +08001084 input_name = __clk_get_name(clk);
1085 clk = clk_register_fixed_factor(NULL, name, input_name,
Scott Wood0dfc86b2015-09-19 23:29:54 -05001086 0, 1, 1);
1087 if (IS_ERR(clk))
1088 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
1089 PTR_ERR(clk));
1090
1091 return clk;
1092}
1093
Scott Wood80b4ae72017-03-20 10:37:23 +08001094static struct clk __init *input_clock_by_name(const char *name,
1095 const char *dtname)
1096{
1097 struct clk *clk;
1098
1099 clk = of_clk_get_by_name(clockgen.node, dtname);
1100 if (IS_ERR(clk))
1101 return clk;
1102
1103 return input_clock(name, clk);
1104}
1105
1106static struct clk __init *input_clock_by_index(const char *name, int idx)
1107{
1108 struct clk *clk;
1109
1110 clk = of_clk_get(clockgen.node, 0);
1111 if (IS_ERR(clk))
1112 return clk;
1113
1114 return input_clock(name, clk);
1115}
1116
Scott Wood0dfc86b2015-09-19 23:29:54 -05001117static struct clk * __init create_sysclk(const char *name)
1118{
1119 struct device_node *sysclk;
1120 struct clk *clk;
1121
1122 clk = sysclk_from_fixed(clockgen.node, name);
1123 if (!IS_ERR(clk))
1124 return clk;
1125
Scott Wood80b4ae72017-03-20 10:37:23 +08001126 clk = input_clock_by_name(name, "sysclk");
1127 if (!IS_ERR(clk))
1128 return clk;
1129
1130 clk = input_clock_by_index(name, 0);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001131 if (!IS_ERR(clk))
1132 return clk;
1133
1134 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
1135 if (sysclk) {
1136 clk = sysclk_from_fixed(sysclk, name);
1137 if (!IS_ERR(clk))
1138 return clk;
1139 }
1140
Scott Wood80b4ae72017-03-20 10:37:23 +08001141 pr_err("%s: No input sysclk\n", __func__);
1142 return NULL;
1143}
1144
1145static struct clk * __init create_coreclk(const char *name)
1146{
1147 struct clk *clk;
1148
1149 clk = input_clock_by_name(name, "coreclk");
1150 if (!IS_ERR(clk))
1151 return clk;
1152
1153 /*
1154 * This indicates a mix of legacy nodes with the new coreclk
1155 * mechanism, which should never happen. If this error occurs,
1156 * don't use the wrong input clock just because coreclk isn't
1157 * ready yet.
1158 */
1159 if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
1160 return clk;
1161
Scott Wood0dfc86b2015-09-19 23:29:54 -05001162 return NULL;
1163}
1164
1165/* Legacy node */
1166static void __init sysclk_init(struct device_node *node)
1167{
1168 struct clk *clk;
1169
1170 legacy_init_clockgen(node);
1171
1172 clk = clockgen.sysclk;
1173 if (clk)
1174 of_clk_add_provider(node, of_clk_src_simple_get, clk);
1175}
1176
1177#define PLL_KILL BIT(31)
1178
1179static void __init create_one_pll(struct clockgen *cg, int idx)
1180{
1181 u32 __iomem *reg;
1182 u32 mult;
1183 struct clockgen_pll *pll = &cg->pll[idx];
Scott Wood80b4ae72017-03-20 10:37:23 +08001184 const char *input = "cg-sysclk";
Scott Wood0dfc86b2015-09-19 23:29:54 -05001185 int i;
1186
1187 if (!(cg->info.pll_mask & (1 << idx)))
1188 return;
1189
Scott Wood80b4ae72017-03-20 10:37:23 +08001190 if (cg->coreclk && idx != PLATFORM_PLL) {
1191 if (IS_ERR(cg->coreclk))
1192 return;
1193
1194 input = "cg-coreclk";
1195 }
1196
Scott Wood9e19ca22015-09-19 23:29:55 -05001197 if (cg->info.flags & CG_VER3) {
1198 switch (idx) {
1199 case PLATFORM_PLL:
1200 reg = cg->regs + 0x60080;
1201 break;
1202 case CGA_PLL1:
1203 reg = cg->regs + 0x80;
1204 break;
1205 case CGA_PLL2:
1206 reg = cg->regs + 0xa0;
1207 break;
1208 case CGB_PLL1:
1209 reg = cg->regs + 0x10080;
1210 break;
1211 case CGB_PLL2:
1212 reg = cg->regs + 0x100a0;
1213 break;
1214 default:
1215 WARN_ONCE(1, "index %d\n", idx);
1216 return;
1217 }
1218 } else {
1219 if (idx == PLATFORM_PLL)
1220 reg = cg->regs + 0xc00;
1221 else
1222 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1223 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001224
1225 /* Get the multiple of PLL */
Scott Wood9e19ca22015-09-19 23:29:55 -05001226 mult = cg_in(cg, reg);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001227
1228 /* Check if this PLL is disabled */
1229 if (mult & PLL_KILL) {
1230 pr_debug("%s(): pll %p disabled\n", __func__, reg);
Tang Yuantian555eae92013-04-09 16:46:26 +08001231 return;
1232 }
1233
Scott Wood9e19ca22015-09-19 23:29:55 -05001234 if ((cg->info.flags & CG_VER3) ||
1235 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
Scott Wood0dfc86b2015-09-19 23:29:54 -05001236 mult = (mult & GENMASK(8, 1)) >> 1;
1237 else
1238 mult = (mult & GENMASK(6, 1)) >> 1;
Tang Yuantian555eae92013-04-09 16:46:26 +08001239
Scott Wood0dfc86b2015-09-19 23:29:54 -05001240 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1241 struct clk *clk;
Yuantian Tang45899dc2017-04-06 10:21:23 +08001242 int ret;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001243
Yuantian Tang36ab0462017-11-22 09:40:53 +08001244 /*
Yuantian Tangcc61ab92019-04-22 17:15:09 +08001245 * For platform PLL, there are MAX_PLL_DIV divider clocks.
Yuantian Tang36ab0462017-11-22 09:40:53 +08001246 * For core PLL, there are 4 divider clocks at most.
1247 */
1248 if (idx != PLATFORM_PLL && i >= 4)
1249 break;
1250
Scott Wood0dfc86b2015-09-19 23:29:54 -05001251 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1252 "cg-pll%d-div%d", idx, i + 1);
1253
1254 clk = clk_register_fixed_factor(NULL,
Scott Wood80b4ae72017-03-20 10:37:23 +08001255 pll->div[i].name, input, 0, mult, i + 1);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001256 if (IS_ERR(clk)) {
1257 pr_err("%s: %s: register failed %ld\n",
1258 __func__, pll->div[i].name, PTR_ERR(clk));
1259 continue;
1260 }
1261
1262 pll->div[i].clk = clk;
Yuantian Tang45899dc2017-04-06 10:21:23 +08001263 ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
1264 if (ret != 0)
Dan Carpenter8f99f5e2019-02-18 12:19:06 +03001265 pr_err("%s: %s: register to lookup table failed %d\n",
1266 __func__, pll->div[i].name, ret);
Yuantian Tang45899dc2017-04-06 10:21:23 +08001267
Tang Yuantian555eae92013-04-09 16:46:26 +08001268 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001269}
Tang Yuantian555eae92013-04-09 16:46:26 +08001270
Scott Wood0dfc86b2015-09-19 23:29:54 -05001271static void __init create_plls(struct clockgen *cg)
1272{
1273 int i;
Tang Yuantian555eae92013-04-09 16:46:26 +08001274
Scott Wood0dfc86b2015-09-19 23:29:54 -05001275 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1276 create_one_pll(cg, i);
1277}
1278
1279static void __init legacy_pll_init(struct device_node *np, int idx)
1280{
1281 struct clockgen_pll *pll;
1282 struct clk_onecell_data *onecell_data;
1283 struct clk **subclks;
1284 int count, rc;
1285
1286 legacy_init_clockgen(np);
1287
1288 pll = &clockgen.pll[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +08001289 count = of_property_count_strings(np, "clock-output-names");
Tang Yuantian555eae92013-04-09 16:46:26 +08001290
Scott Wood0dfc86b2015-09-19 23:29:54 -05001291 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1292 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001293 if (!subclks)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001294 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001295
Emil Medve6ef1cca2015-01-21 04:03:28 -06001296 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001297 if (!onecell_data)
Tang Yuantian555eae92013-04-09 16:46:26 +08001298 goto err_clks;
Tang Yuantian555eae92013-04-09 16:46:26 +08001299
Scott Wood0dfc86b2015-09-19 23:29:54 -05001300 if (count <= 3) {
1301 subclks[0] = pll->div[0].clk;
1302 subclks[1] = pll->div[1].clk;
1303 subclks[2] = pll->div[3].clk;
1304 } else {
1305 subclks[0] = pll->div[0].clk;
1306 subclks[1] = pll->div[1].clk;
1307 subclks[2] = pll->div[2].clk;
1308 subclks[3] = pll->div[3].clk;
Tang Yuantian555eae92013-04-09 16:46:26 +08001309 }
1310
1311 onecell_data->clks = subclks;
1312 onecell_data->clk_num = count;
1313
1314 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1315 if (rc) {
Rob Herringe665f022018-08-28 10:44:29 -05001316 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1317 __func__, np, rc);
Tang Yuantian555eae92013-04-09 16:46:26 +08001318 goto err_cell;
1319 }
1320
1321 return;
1322err_cell:
1323 kfree(onecell_data);
1324err_clks:
1325 kfree(subclks);
Tang Yuantian00fa6e52014-01-21 09:32:45 +08001326}
1327
Scott Wood0dfc86b2015-09-19 23:29:54 -05001328/* Legacy node */
Emil Medvea513b722015-01-21 04:03:31 -06001329static void __init pltfrm_pll_init(struct device_node *np)
1330{
Scott Wood0dfc86b2015-09-19 23:29:54 -05001331 legacy_pll_init(np, PLATFORM_PLL);
1332}
Emil Medvea513b722015-01-21 04:03:31 -06001333
Scott Wood0dfc86b2015-09-19 23:29:54 -05001334/* Legacy node */
1335static void __init core_pll_init(struct device_node *np)
1336{
1337 struct resource res;
1338 int idx;
1339
1340 if (of_address_to_resource(np, 0, &res))
1341 return;
1342
1343 if ((res.start & 0xfff) == 0xc00) {
1344 /*
1345 * ls1021a devtree labels the platform PLL
1346 * with the core PLL compatible
1347 */
1348 pltfrm_pll_init(np);
1349 } else {
1350 idx = (res.start & 0xf0) >> 5;
1351 legacy_pll_init(np, CGA_PLL1 + idx);
1352 }
1353}
1354
1355static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1356{
1357 struct clockgen *cg = data;
1358 struct clk *clk;
1359 struct clockgen_pll *pll;
1360 u32 type, idx;
1361
1362 if (clkspec->args_count < 2) {
1363 pr_err("%s: insufficient phandle args\n", __func__);
1364 return ERR_PTR(-EINVAL);
1365 }
1366
1367 type = clkspec->args[0];
1368 idx = clkspec->args[1];
1369
1370 switch (type) {
1371 case 0:
1372 if (idx != 0)
1373 goto bad_args;
1374 clk = cg->sysclk;
1375 break;
1376 case 1:
1377 if (idx >= ARRAY_SIZE(cg->cmux))
1378 goto bad_args;
1379 clk = cg->cmux[idx];
1380 break;
1381 case 2:
1382 if (idx >= ARRAY_SIZE(cg->hwaccel))
1383 goto bad_args;
1384 clk = cg->hwaccel[idx];
1385 break;
1386 case 3:
1387 if (idx >= ARRAY_SIZE(cg->fman))
1388 goto bad_args;
1389 clk = cg->fman[idx];
1390 break;
1391 case 4:
1392 pll = &cg->pll[PLATFORM_PLL];
1393 if (idx >= ARRAY_SIZE(pll->div))
1394 goto bad_args;
1395 clk = pll->div[idx].clk;
1396 break;
Scott Wood80b4ae72017-03-20 10:37:23 +08001397 case 5:
1398 if (idx != 0)
1399 goto bad_args;
1400 clk = cg->coreclk;
1401 if (IS_ERR(clk))
1402 clk = NULL;
1403 break;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001404 default:
1405 goto bad_args;
1406 }
1407
1408 if (!clk)
1409 return ERR_PTR(-ENOENT);
1410 return clk;
1411
1412bad_args:
1413 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1414 return ERR_PTR(-EINVAL);
1415}
1416
1417#ifdef CONFIG_PPC
1418#include <asm/mpc85xx.h>
1419
1420static const u32 a4510_svrs[] __initconst = {
1421 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1422 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1423 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1424 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1425 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1426 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1427 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1428 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1429 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1430 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1431 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1432 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1433 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1434};
1435
1436#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1437
1438static bool __init has_erratum_a4510(void)
1439{
1440 u32 svr = mfspr(SPRN_SVR);
1441 int i;
1442
1443 svr &= ~SVR_SECURITY;
1444
1445 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1446 if (svr == a4510_svrs[i])
1447 return true;
1448 }
1449
1450 return false;
1451}
1452#else
1453static bool __init has_erratum_a4510(void)
1454{
1455 return false;
1456}
1457#endif
1458
Mian Yousaf Kaukabcf1e0442020-04-21 10:30:00 +02001459static void __init _clockgen_init(struct device_node *np, bool legacy)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001460{
1461 int i, ret;
1462 bool is_old_ls1021a = false;
1463
1464 /* May have already been called by a legacy probe */
1465 if (clockgen.node)
1466 return;
1467
1468 clockgen.node = np;
1469 clockgen.regs = of_iomap(np, 0);
1470 if (!clockgen.regs &&
1471 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1472 /* Compatibility hack for old, broken device trees */
1473 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1474 is_old_ls1021a = true;
1475 }
1476 if (!clockgen.regs) {
Rob Herringe665f022018-08-28 10:44:29 -05001477 pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np);
Emil Medvea513b722015-01-21 04:03:31 -06001478 return;
1479 }
1480
Scott Wood0dfc86b2015-09-19 23:29:54 -05001481 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1482 if (of_device_is_compatible(np, chipinfo[i].compat))
1483 break;
1484 if (is_old_ls1021a &&
1485 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1486 break;
Emil Medvea513b722015-01-21 04:03:31 -06001487 }
1488
Scott Wood0dfc86b2015-09-19 23:29:54 -05001489 if (i == ARRAY_SIZE(chipinfo)) {
Rob Herring16673932017-07-18 16:42:52 -05001490 pr_err("%s: unknown clockgen node %pOF\n", __func__, np);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001491 goto err;
Emil Medvea513b722015-01-21 04:03:31 -06001492 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001493 clockgen.info = chipinfo[i];
Emil Medvea513b722015-01-21 04:03:31 -06001494
Scott Wood0dfc86b2015-09-19 23:29:54 -05001495 if (clockgen.info.guts_compat) {
1496 struct device_node *guts;
Emil Medvea513b722015-01-21 04:03:31 -06001497
Scott Wood0dfc86b2015-09-19 23:29:54 -05001498 guts = of_find_compatible_node(NULL, NULL,
1499 clockgen.info.guts_compat);
1500 if (guts) {
1501 clockgen.guts = of_iomap(guts, 0);
1502 if (!clockgen.guts) {
Rob Herring16673932017-07-18 16:42:52 -05001503 pr_err("%s: Couldn't map %pOF regs\n", __func__,
1504 guts);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001505 }
Yangtao Li70af6c52018-12-26 08:14:42 -05001506 of_node_put(guts);
Emil Medvea513b722015-01-21 04:03:31 -06001507 }
1508
Emil Medvea513b722015-01-21 04:03:31 -06001509 }
1510
Scott Wood0dfc86b2015-09-19 23:29:54 -05001511 if (has_erratum_a4510())
1512 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1513
1514 clockgen.sysclk = create_sysclk("cg-sysclk");
Scott Wood80b4ae72017-03-20 10:37:23 +08001515 clockgen.coreclk = create_coreclk("cg-coreclk");
Scott Wood0dfc86b2015-09-19 23:29:54 -05001516 create_plls(&clockgen);
1517 create_muxes(&clockgen);
1518
1519 if (clockgen.info.init_periph)
1520 clockgen.info.init_periph(&clockgen);
1521
1522 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1523 if (ret) {
Rob Herringe665f022018-08-28 10:44:29 -05001524 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1525 __func__, np, ret);
Emil Medvea513b722015-01-21 04:03:31 -06001526 }
1527
Mian Yousaf Kaukabcf1e0442020-04-21 10:30:00 +02001528 /* Don't create cpufreq device for legacy clockgen blocks */
1529 add_cpufreq_dev = !legacy;
1530
Emil Medvea513b722015-01-21 04:03:31 -06001531 return;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001532err:
1533 iounmap(clockgen.regs);
1534 clockgen.regs = NULL;
Emil Medvea513b722015-01-21 04:03:31 -06001535}
1536
Mian Yousaf Kaukabcf1e0442020-04-21 10:30:00 +02001537static void __init clockgen_init(struct device_node *np)
1538{
1539 _clockgen_init(np, false);
1540}
1541
1542static int __init clockgen_cpufreq_init(void)
1543{
1544 struct platform_device *pdev;
1545
1546 if (add_cpufreq_dev) {
1547 pdev = platform_device_register_simple("qoriq-cpufreq", -1,
1548 NULL, 0);
1549 if (IS_ERR(pdev))
1550 pr_err("Couldn't register qoriq-cpufreq err=%ld\n",
1551 PTR_ERR(pdev));
1552 }
1553 return 0;
1554}
1555device_initcall(clockgen_cpufreq_init);
1556
Scott Wood0dfc86b2015-09-19 23:29:54 -05001557CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1558CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
Yuantian Tangb8b211c2018-10-31 15:46:16 +08001559CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
1560CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
Tang Yuantian44709352016-11-24 10:36:55 +08001561CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001562CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
Yuantian Tang95089f62019-04-24 09:19:12 +08001563CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init);
Hou Zhiqiange994412c2015-10-23 16:01:21 +08001564CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
Mingkai Hu80e52192016-09-07 11:48:30 +08001565CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
Yuantian Tange0c888c42017-04-06 10:21:22 +08001566CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
Scott Wood9e19ca22015-09-19 23:29:55 -05001567CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
Vabhav Sharma78a5ba82019-04-26 06:53:38 +00001568CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init);
Yuantian Tangb8b211c2018-10-31 15:46:16 +08001569CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init);
1570CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init);
1571CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init);
1572CLK_OF_DECLARE(qoriq_clockgen_p5020, "fsl,p5020-clockgen", clockgen_init);
1573CLK_OF_DECLARE(qoriq_clockgen_p5040, "fsl,p5040-clockgen", clockgen_init);
1574CLK_OF_DECLARE(qoriq_clockgen_t1023, "fsl,t1023-clockgen", clockgen_init);
1575CLK_OF_DECLARE(qoriq_clockgen_t1040, "fsl,t1040-clockgen", clockgen_init);
1576CLK_OF_DECLARE(qoriq_clockgen_t2080, "fsl,t2080-clockgen", clockgen_init);
1577CLK_OF_DECLARE(qoriq_clockgen_t4240, "fsl,t4240-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001578
1579/* Legacy nodes */
Kevin Hao66619ac2014-12-03 16:53:53 +08001580CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1581CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1582CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1583CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1584CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1585CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
Emil Medvea513b722015-01-21 04:03:31 -06001586CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1587CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);