blob: dd93d3acc67d8228df65519ff8ae15b6b0dbc9f8 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Tang Yuantian555eae92013-04-09 16:46:26 +08002/*
3 * Copyright 2013 Freescale Semiconductor, Inc.
4 *
Tang Yuantian93a17c02015-01-15 14:03:41 +08005 * clock driver for Freescale QorIQ SoCs.
Tang Yuantian555eae92013-04-09 16:46:26 +08006 */
Emil Medvec88b2b62015-01-21 04:03:29 -06007
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Scott Wood0dfc86b2015-09-19 23:29:54 -050010#include <linux/clk.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080011#include <linux/clk-provider.h>
Yuantian Tang45899dc2017-04-06 10:21:23 +080012#include <linux/clkdev.h>
Scott Wood0dfc86b2015-09-19 23:29:54 -050013#include <linux/fsl/guts.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080014#include <linux/io.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
Rob Herringc11eede2013-11-10 23:19:08 -060017#include <linux/of_address.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080018#include <linux/of_platform.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21
Scott Wood0dfc86b2015-09-19 23:29:54 -050022#define PLL_DIV1 0
23#define PLL_DIV2 1
24#define PLL_DIV3 2
25#define PLL_DIV4 3
26
27#define PLATFORM_PLL 0
28#define CGA_PLL1 1
29#define CGA_PLL2 2
30#define CGA_PLL3 3
31#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
32#define CGB_PLL1 4
33#define CGB_PLL2 5
Yuantian Tangcc61ab92019-04-22 17:15:09 +080034#define MAX_PLL_DIV 16
Scott Wood0dfc86b2015-09-19 23:29:54 -050035
36struct clockgen_pll_div {
37 struct clk *clk;
38 char name[32];
Tang Yuantian555eae92013-04-09 16:46:26 +080039};
40
Scott Wood0dfc86b2015-09-19 23:29:54 -050041struct clockgen_pll {
Yuantian Tangcc61ab92019-04-22 17:15:09 +080042 struct clockgen_pll_div div[MAX_PLL_DIV];
Scott Wood0dfc86b2015-09-19 23:29:54 -050043};
Tang Yuantian555eae92013-04-09 16:46:26 +080044
Scott Wood0dfc86b2015-09-19 23:29:54 -050045#define CLKSEL_VALID 1
46#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
47
48struct clockgen_sourceinfo {
49 u32 flags; /* CLKSEL_xxx */
50 int pll; /* CGx_PLLn */
51 int div; /* PLL_DIVn */
52};
53
54#define NUM_MUX_PARENTS 16
55
56struct clockgen_muxinfo {
57 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
58};
59
60#define NUM_HWACCEL 5
61#define NUM_CMUX 8
62
63struct clockgen;
64
65/*
66 * cmux freq must be >= platform pll.
67 * If not set, cmux freq must be >= platform pll/2
68 */
69#define CG_CMUX_GE_PLAT 1
Scott Wood9e19ca22015-09-19 23:29:55 -050070
Scott Wood0dfc86b2015-09-19 23:29:54 -050071#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
Scott Wood9e19ca22015-09-19 23:29:55 -050072#define CG_VER3 4 /* version 3 cg: reg layout different */
73#define CG_LITTLE_ENDIAN 8
Scott Wood0dfc86b2015-09-19 23:29:54 -050074
75struct clockgen_chipinfo {
76 const char *compat, *guts_compat;
77 const struct clockgen_muxinfo *cmux_groups[2];
78 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
79 void (*init_periph)(struct clockgen *cg);
Yogesh Gaur42614b52019-04-25 09:47:48 +000080 int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */
Scott Wood0dfc86b2015-09-19 23:29:54 -050081 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
82 u32 flags; /* CG_xxx */
83};
84
85struct clockgen {
86 struct device_node *node;
87 void __iomem *regs;
88 struct clockgen_chipinfo info; /* mutable copy */
Scott Wood80b4ae72017-03-20 10:37:23 +080089 struct clk *sysclk, *coreclk;
Scott Wood0dfc86b2015-09-19 23:29:54 -050090 struct clockgen_pll pll[6];
91 struct clk *cmux[NUM_CMUX];
92 struct clk *hwaccel[NUM_HWACCEL];
93 struct clk *fman[2];
94 struct ccsr_guts __iomem *guts;
95};
96
97static struct clockgen clockgen;
98
Scott Wood9e19ca22015-09-19 23:29:55 -050099static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
100{
101 if (cg->info.flags & CG_LITTLE_ENDIAN)
102 iowrite32(val, reg);
103 else
104 iowrite32be(val, reg);
105}
106
107static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
108{
109 u32 val;
110
111 if (cg->info.flags & CG_LITTLE_ENDIAN)
112 val = ioread32(reg);
113 else
114 val = ioread32be(reg);
115
116 return val;
117}
118
Scott Wood0dfc86b2015-09-19 23:29:54 -0500119static const struct clockgen_muxinfo p2041_cmux_grp1 = {
120 {
121 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
122 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
123 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
124 }
125};
126
127static const struct clockgen_muxinfo p2041_cmux_grp2 = {
128 {
129 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
Scott Wood2c7693e2015-10-22 23:21:46 -0500130 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
131 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
Scott Wood0dfc86b2015-09-19 23:29:54 -0500132 }
133};
134
135static const struct clockgen_muxinfo p5020_cmux_grp1 = {
136 {
137 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
138 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
139 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
140 }
141};
142
143static const struct clockgen_muxinfo p5020_cmux_grp2 = {
144 {
145 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
146 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
147 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
148 }
149};
150
151static const struct clockgen_muxinfo p5040_cmux_grp1 = {
152 {
153 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
154 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
155 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
156 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
157 }
158};
159
160static const struct clockgen_muxinfo p5040_cmux_grp2 = {
161 {
162 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
163 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
164 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
165 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
166 }
167};
168
169static const struct clockgen_muxinfo p4080_cmux_grp1 = {
170 {
171 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
172 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
173 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
174 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
175 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
176 }
177};
178
179static const struct clockgen_muxinfo p4080_cmux_grp2 = {
180 {
181 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
182 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
183 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
184 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
185 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
186 }
187};
188
189static const struct clockgen_muxinfo t1023_cmux = {
190 {
191 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
192 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
193 }
194};
195
196static const struct clockgen_muxinfo t1040_cmux = {
197 {
198 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
199 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
200 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
201 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
202 }
203};
204
205
206static const struct clockgen_muxinfo clockgen2_cmux_cga = {
207 {
208 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
211 {},
212 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
215 {},
216 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
219 },
220};
221
222static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
223 {
224 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
227 {},
228 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
231 },
232};
233
234static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
235 {
236 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
239 {},
240 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
243 },
244};
245
Yuantian Tang95089f62019-04-24 09:19:12 +0800246static const struct clockgen_muxinfo ls1028a_hwa1 = {
247 {
248 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
249 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
250 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
253 {},
254 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
256 },
257};
258
259static const struct clockgen_muxinfo ls1028a_hwa2 = {
260 {
261 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
262 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
264 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
266 {},
267 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
268 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
269 },
270};
271
272static const struct clockgen_muxinfo ls1028a_hwa3 = {
273 {
274 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
275 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
276 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
277 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
278 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
279 {},
280 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
281 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
282 },
283};
284
285static const struct clockgen_muxinfo ls1028a_hwa4 = {
286 {
287 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
288 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
289 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
290 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
291 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
292 {},
293 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
294 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
295 },
296};
297
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800298static const struct clockgen_muxinfo ls1043a_hwa1 = {
299 {
300 {},
301 {},
302 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
303 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
304 {},
305 {},
306 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
307 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
308 },
309};
310
311static const struct clockgen_muxinfo ls1043a_hwa2 = {
312 {
313 {},
314 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
315 {},
316 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
317 },
318};
319
Mingkai Hu80e52192016-09-07 11:48:30 +0800320static const struct clockgen_muxinfo ls1046a_hwa1 = {
321 {
322 {},
323 {},
324 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
325 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
326 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
327 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
328 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
329 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
330 },
331};
332
333static const struct clockgen_muxinfo ls1046a_hwa2 = {
334 {
335 {},
336 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
337 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
338 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
339 {},
340 {},
341 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
342 },
343};
344
Tang Yuantian44709352016-11-24 10:36:55 +0800345static const struct clockgen_muxinfo ls1012a_cmux = {
346 {
347 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
348 {},
349 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
350 }
351};
352
Scott Wood0dfc86b2015-09-19 23:29:54 -0500353static const struct clockgen_muxinfo t1023_hwa1 = {
354 {
355 {},
356 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
357 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
358 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
359 },
360};
361
362static const struct clockgen_muxinfo t1023_hwa2 = {
363 {
364 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
365 },
366};
367
368static const struct clockgen_muxinfo t2080_hwa1 = {
369 {
370 {},
371 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
372 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
373 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
374 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
375 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
376 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
377 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
378 },
379};
380
381static const struct clockgen_muxinfo t2080_hwa2 = {
382 {
383 {},
384 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
385 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
386 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
387 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
388 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
389 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
390 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
391 },
392};
393
394static const struct clockgen_muxinfo t4240_hwa1 = {
395 {
396 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
397 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
398 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
399 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
400 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
401 {},
402 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
403 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
404 },
405};
406
407static const struct clockgen_muxinfo t4240_hwa4 = {
408 {
409 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
410 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
411 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
412 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
413 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
414 },
415};
416
417static const struct clockgen_muxinfo t4240_hwa5 = {
418 {
419 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
420 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
421 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
422 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
423 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
424 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
425 },
426};
427
428#define RCWSR7_FM1_CLK_SEL 0x40000000
429#define RCWSR7_FM2_CLK_SEL 0x20000000
430#define RCWSR7_HWA_ASYNC_DIV 0x04000000
431
432static void __init p2041_init_periph(struct clockgen *cg)
Tang Yuantian555eae92013-04-09 16:46:26 +0800433{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500434 u32 reg;
435
436 reg = ioread32be(&cg->guts->rcwsr[7]);
437
438 if (reg & RCWSR7_FM1_CLK_SEL)
439 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
440 else
441 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
442}
443
444static void __init p4080_init_periph(struct clockgen *cg)
445{
446 u32 reg;
447
448 reg = ioread32be(&cg->guts->rcwsr[7]);
449
450 if (reg & RCWSR7_FM1_CLK_SEL)
451 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
452 else
453 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
454
455 if (reg & RCWSR7_FM2_CLK_SEL)
456 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
457 else
458 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
459}
460
461static void __init p5020_init_periph(struct clockgen *cg)
462{
463 u32 reg;
464 int div = PLL_DIV2;
465
466 reg = ioread32be(&cg->guts->rcwsr[7]);
467 if (reg & RCWSR7_HWA_ASYNC_DIV)
468 div = PLL_DIV4;
469
470 if (reg & RCWSR7_FM1_CLK_SEL)
471 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
472 else
473 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
474}
475
476static void __init p5040_init_periph(struct clockgen *cg)
477{
478 u32 reg;
479 int div = PLL_DIV2;
480
481 reg = ioread32be(&cg->guts->rcwsr[7]);
482 if (reg & RCWSR7_HWA_ASYNC_DIV)
483 div = PLL_DIV4;
484
485 if (reg & RCWSR7_FM1_CLK_SEL)
486 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
487 else
488 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
489
490 if (reg & RCWSR7_FM2_CLK_SEL)
491 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
492 else
493 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
494}
495
496static void __init t1023_init_periph(struct clockgen *cg)
497{
498 cg->fman[0] = cg->hwaccel[1];
499}
500
501static void __init t1040_init_periph(struct clockgen *cg)
502{
503 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
504}
505
506static void __init t2080_init_periph(struct clockgen *cg)
507{
508 cg->fman[0] = cg->hwaccel[0];
509}
510
511static void __init t4240_init_periph(struct clockgen *cg)
512{
513 cg->fman[0] = cg->hwaccel[3];
514 cg->fman[1] = cg->hwaccel[4];
515}
516
517static const struct clockgen_chipinfo chipinfo[] = {
518 {
519 .compat = "fsl,b4420-clockgen",
520 .guts_compat = "fsl,b4860-device-config",
521 .init_periph = t2080_init_periph,
522 .cmux_groups = {
523 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
524 },
525 .hwaccel = {
526 &t2080_hwa1
527 },
528 .cmux_to_group = {
529 0, 1, 1, 1, -1
530 },
531 .pll_mask = 0x3f,
532 .flags = CG_PLL_8BIT,
533 },
534 {
535 .compat = "fsl,b4860-clockgen",
536 .guts_compat = "fsl,b4860-device-config",
537 .init_periph = t2080_init_periph,
538 .cmux_groups = {
539 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
540 },
541 .hwaccel = {
542 &t2080_hwa1
543 },
544 .cmux_to_group = {
545 0, 1, 1, 1, -1
546 },
547 .pll_mask = 0x3f,
548 .flags = CG_PLL_8BIT,
549 },
550 {
551 .compat = "fsl,ls1021a-clockgen",
552 .cmux_groups = {
553 &t1023_cmux
554 },
555 .cmux_to_group = {
556 0, -1
557 },
558 .pll_mask = 0x03,
559 },
560 {
Yuantian Tang95089f62019-04-24 09:19:12 +0800561 .compat = "fsl,ls1028a-clockgen",
562 .cmux_groups = {
563 &clockgen2_cmux_cga12
564 },
565 .hwaccel = {
566 &ls1028a_hwa1, &ls1028a_hwa2,
567 &ls1028a_hwa3, &ls1028a_hwa4
568 },
569 .cmux_to_group = {
570 0, 0, 0, 0, -1
571 },
572 .pll_mask = 0x07,
573 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
574 },
575 {
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800576 .compat = "fsl,ls1043a-clockgen",
577 .init_periph = t2080_init_periph,
578 .cmux_groups = {
579 &t1040_cmux
580 },
581 .hwaccel = {
582 &ls1043a_hwa1, &ls1043a_hwa2
583 },
584 .cmux_to_group = {
585 0, -1
586 },
587 .pll_mask = 0x07,
588 .flags = CG_PLL_8BIT,
589 },
590 {
Mingkai Hu80e52192016-09-07 11:48:30 +0800591 .compat = "fsl,ls1046a-clockgen",
592 .init_periph = t2080_init_periph,
593 .cmux_groups = {
594 &t1040_cmux
595 },
596 .hwaccel = {
597 &ls1046a_hwa1, &ls1046a_hwa2
598 },
599 .cmux_to_group = {
600 0, -1
601 },
602 .pll_mask = 0x07,
603 .flags = CG_PLL_8BIT,
604 },
605 {
Yuantian Tange0c888c42017-04-06 10:21:22 +0800606 .compat = "fsl,ls1088a-clockgen",
607 .cmux_groups = {
608 &clockgen2_cmux_cga12
609 },
610 .cmux_to_group = {
611 0, 0, -1
612 },
613 .pll_mask = 0x07,
614 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
615 },
616 {
Tang Yuantian44709352016-11-24 10:36:55 +0800617 .compat = "fsl,ls1012a-clockgen",
618 .cmux_groups = {
619 &ls1012a_cmux
620 },
621 .cmux_to_group = {
622 0, -1
623 },
624 .pll_mask = 0x03,
625 },
626 {
Scott Wood9e19ca22015-09-19 23:29:55 -0500627 .compat = "fsl,ls2080a-clockgen",
628 .cmux_groups = {
629 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
630 },
631 .cmux_to_group = {
632 0, 0, 1, 1, -1
633 },
634 .pll_mask = 0x37,
635 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
636 },
637 {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500638 .compat = "fsl,p2041-clockgen",
639 .guts_compat = "fsl,qoriq-device-config-1.0",
640 .init_periph = p2041_init_periph,
641 .cmux_groups = {
642 &p2041_cmux_grp1, &p2041_cmux_grp2
643 },
644 .cmux_to_group = {
645 0, 0, 1, 1, -1
646 },
647 .pll_mask = 0x07,
648 },
649 {
650 .compat = "fsl,p3041-clockgen",
651 .guts_compat = "fsl,qoriq-device-config-1.0",
652 .init_periph = p2041_init_periph,
653 .cmux_groups = {
654 &p2041_cmux_grp1, &p2041_cmux_grp2
655 },
656 .cmux_to_group = {
657 0, 0, 1, 1, -1
658 },
659 .pll_mask = 0x07,
660 },
661 {
662 .compat = "fsl,p4080-clockgen",
663 .guts_compat = "fsl,qoriq-device-config-1.0",
664 .init_periph = p4080_init_periph,
665 .cmux_groups = {
666 &p4080_cmux_grp1, &p4080_cmux_grp2
667 },
668 .cmux_to_group = {
Yogesh Gaur42614b52019-04-25 09:47:48 +0000669 0, 0, 0, 0, 1, 1, 1, 1, -1
Scott Wood0dfc86b2015-09-19 23:29:54 -0500670 },
671 .pll_mask = 0x1f,
672 },
673 {
674 .compat = "fsl,p5020-clockgen",
675 .guts_compat = "fsl,qoriq-device-config-1.0",
676 .init_periph = p5020_init_periph,
677 .cmux_groups = {
678 &p2041_cmux_grp1, &p2041_cmux_grp2
679 },
680 .cmux_to_group = {
681 0, 1, -1
682 },
683 .pll_mask = 0x07,
684 },
685 {
686 .compat = "fsl,p5040-clockgen",
687 .guts_compat = "fsl,p5040-device-config",
688 .init_periph = p5040_init_periph,
689 .cmux_groups = {
690 &p5040_cmux_grp1, &p5040_cmux_grp2
691 },
692 .cmux_to_group = {
693 0, 0, 1, 1, -1
694 },
695 .pll_mask = 0x0f,
696 },
697 {
698 .compat = "fsl,t1023-clockgen",
699 .guts_compat = "fsl,t1023-device-config",
700 .init_periph = t1023_init_periph,
701 .cmux_groups = {
702 &t1023_cmux
703 },
704 .hwaccel = {
705 &t1023_hwa1, &t1023_hwa2
706 },
707 .cmux_to_group = {
708 0, 0, -1
709 },
710 .pll_mask = 0x03,
711 .flags = CG_PLL_8BIT,
712 },
713 {
714 .compat = "fsl,t1040-clockgen",
715 .guts_compat = "fsl,t1040-device-config",
716 .init_periph = t1040_init_periph,
717 .cmux_groups = {
718 &t1040_cmux
719 },
720 .cmux_to_group = {
721 0, 0, 0, 0, -1
722 },
723 .pll_mask = 0x07,
724 .flags = CG_PLL_8BIT,
725 },
726 {
727 .compat = "fsl,t2080-clockgen",
728 .guts_compat = "fsl,t2080-device-config",
729 .init_periph = t2080_init_periph,
730 .cmux_groups = {
731 &clockgen2_cmux_cga12
732 },
733 .hwaccel = {
734 &t2080_hwa1, &t2080_hwa2
735 },
736 .cmux_to_group = {
737 0, -1
738 },
739 .pll_mask = 0x07,
740 .flags = CG_PLL_8BIT,
741 },
742 {
743 .compat = "fsl,t4240-clockgen",
744 .guts_compat = "fsl,t4240-device-config",
745 .init_periph = t4240_init_periph,
746 .cmux_groups = {
747 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
748 },
749 .hwaccel = {
750 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
751 },
752 .cmux_to_group = {
753 0, 0, 1, -1
754 },
755 .pll_mask = 0x3f,
756 .flags = CG_PLL_8BIT,
757 },
758 {},
759};
760
761struct mux_hwclock {
762 struct clk_hw hw;
763 struct clockgen *cg;
764 const struct clockgen_muxinfo *info;
765 u32 __iomem *reg;
766 u8 parent_to_clksel[NUM_MUX_PARENTS];
767 s8 clksel_to_parent[NUM_MUX_PARENTS];
768 int num_parents;
769};
770
771#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
772#define CLKSEL_MASK 0x78000000
773#define CLKSEL_SHIFT 27
774
775static int mux_set_parent(struct clk_hw *hw, u8 idx)
776{
777 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800778 u32 clksel;
779
Scott Wood0dfc86b2015-09-19 23:29:54 -0500780 if (idx >= hwc->num_parents)
781 return -EINVAL;
782
783 clksel = hwc->parent_to_clksel[idx];
Scott Wood9e19ca22015-09-19 23:29:55 -0500784 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
Tang Yuantian555eae92013-04-09 16:46:26 +0800785
786 return 0;
787}
788
Scott Wood0dfc86b2015-09-19 23:29:54 -0500789static u8 mux_get_parent(struct clk_hw *hw)
Tang Yuantian555eae92013-04-09 16:46:26 +0800790{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500791 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800792 u32 clksel;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500793 s8 ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800794
Scott Wood9e19ca22015-09-19 23:29:55 -0500795 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Tang Yuantian555eae92013-04-09 16:46:26 +0800796
Scott Wood0dfc86b2015-09-19 23:29:54 -0500797 ret = hwc->clksel_to_parent[clksel];
798 if (ret < 0) {
799 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
800 return 0;
801 }
802
803 return ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800804}
805
Emil Medve334680d2015-01-21 04:03:27 -0600806static const struct clk_ops cmux_ops = {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500807 .get_parent = mux_get_parent,
808 .set_parent = mux_set_parent,
Tang Yuantian555eae92013-04-09 16:46:26 +0800809};
810
Scott Wood0dfc86b2015-09-19 23:29:54 -0500811/*
812 * Don't allow setting for now, as the clock options haven't been
813 * sanitized for additional restrictions.
814 */
815static const struct clk_ops hwaccel_ops = {
816 .get_parent = mux_get_parent,
817};
818
819static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
820 struct mux_hwclock *hwc,
821 int idx)
822{
823 int pll, div;
824
825 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
826 return NULL;
827
828 pll = hwc->info->clksel[idx].pll;
829 div = hwc->info->clksel[idx].div;
830
831 return &cg->pll[pll].div[div];
832}
833
834static struct clk * __init create_mux_common(struct clockgen *cg,
835 struct mux_hwclock *hwc,
836 const struct clk_ops *ops,
837 unsigned long min_rate,
Scott Wood7c1c5412016-10-17 13:42:23 -0500838 unsigned long max_rate,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500839 unsigned long pct80_rate,
840 const char *fmt, int idx)
841{
842 struct clk_init_data init = {};
843 struct clk *clk;
844 const struct clockgen_pll_div *div;
845 const char *parent_names[NUM_MUX_PARENTS];
846 char name[32];
847 int i, j;
848
849 snprintf(name, sizeof(name), fmt, idx);
850
851 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
852 unsigned long rate;
853
854 hwc->clksel_to_parent[i] = -1;
855
856 div = get_pll_div(cg, hwc, i);
857 if (!div)
858 continue;
859
860 rate = clk_get_rate(div->clk);
861
862 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
863 rate > pct80_rate)
864 continue;
865 if (rate < min_rate)
866 continue;
Scott Wood7c1c5412016-10-17 13:42:23 -0500867 if (rate > max_rate)
868 continue;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500869
870 parent_names[j] = div->name;
871 hwc->parent_to_clksel[j] = i;
872 hwc->clksel_to_parent[i] = j;
873 j++;
874 }
875
876 init.name = name;
877 init.ops = ops;
878 init.parent_names = parent_names;
879 init.num_parents = hwc->num_parents = j;
880 init.flags = 0;
881 hwc->hw.init = &init;
882 hwc->cg = cg;
883
884 clk = clk_register(NULL, &hwc->hw);
885 if (IS_ERR(clk)) {
886 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
887 PTR_ERR(clk));
888 kfree(hwc);
889 return NULL;
890 }
891
892 return clk;
893}
894
895static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
896{
897 struct mux_hwclock *hwc;
898 const struct clockgen_pll_div *div;
899 unsigned long plat_rate, min_rate;
Scott Wood7c1c5412016-10-17 13:42:23 -0500900 u64 max_rate, pct80_rate;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500901 u32 clksel;
902
903 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
904 if (!hwc)
905 return NULL;
906
Tang Yuantian89641932016-08-15 15:28:20 +0800907 if (cg->info.flags & CG_VER3)
908 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
909 else
910 hwc->reg = cg->regs + 0x20 * idx;
911
Scott Wood0dfc86b2015-09-19 23:29:54 -0500912 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
913
914 /*
915 * Find the rate for the default clksel, and treat it as the
916 * maximum rated core frequency. If this is an incorrect
917 * assumption, certain clock options (possibly including the
918 * default clksel) may be inappropriately excluded on certain
919 * chips.
920 */
Scott Wood9e19ca22015-09-19 23:29:55 -0500921 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500922 div = get_pll_div(cg, hwc, clksel);
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530923 if (!div) {
924 kfree(hwc);
Scott Wood0dfc86b2015-09-19 23:29:54 -0500925 return NULL;
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530926 }
Scott Wood0dfc86b2015-09-19 23:29:54 -0500927
Scott Wood7c1c5412016-10-17 13:42:23 -0500928 max_rate = clk_get_rate(div->clk);
929 pct80_rate = max_rate * 8;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500930 do_div(pct80_rate, 10);
931
932 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
933
934 if (cg->info.flags & CG_CMUX_GE_PLAT)
935 min_rate = plat_rate;
936 else
937 min_rate = plat_rate / 2;
938
Scott Wood7c1c5412016-10-17 13:42:23 -0500939 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500940 pct80_rate, "cg-cmux%d", idx);
941}
942
943static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
944{
945 struct mux_hwclock *hwc;
946
947 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
948 if (!hwc)
949 return NULL;
950
951 hwc->reg = cg->regs + 0x20 * idx + 0x10;
952 hwc->info = cg->info.hwaccel[idx];
953
Scott Wood7c1c5412016-10-17 13:42:23 -0500954 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500955 "cg-hwaccel%d", idx);
956}
957
958static void __init create_muxes(struct clockgen *cg)
959{
960 int i;
961
962 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
963 if (cg->info.cmux_to_group[i] < 0)
964 break;
965 if (cg->info.cmux_to_group[i] >=
966 ARRAY_SIZE(cg->info.cmux_groups)) {
967 WARN_ON_ONCE(1);
968 continue;
969 }
970
971 cg->cmux[i] = create_one_cmux(cg, i);
972 }
973
974 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
975 if (!cg->info.hwaccel[i])
976 continue;
977
978 cg->hwaccel[i] = create_one_hwaccel(cg, i);
979 }
980}
981
982static void __init clockgen_init(struct device_node *np);
983
Scott Wood80b4ae72017-03-20 10:37:23 +0800984/*
985 * Legacy nodes may get probed before the parent clockgen node.
986 * It is assumed that device trees with legacy nodes will not
987 * contain a "clocks" property -- otherwise the input clocks may
988 * not be initialized at this point.
989 */
Scott Wood0dfc86b2015-09-19 23:29:54 -0500990static void __init legacy_init_clockgen(struct device_node *np)
991{
992 if (!clockgen.node)
993 clockgen_init(of_get_parent(np));
994}
995
996/* Legacy node */
Tang Yuantian555eae92013-04-09 16:46:26 +0800997static void __init core_mux_init(struct device_node *np)
998{
999 struct clk *clk;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001000 struct resource res;
1001 int idx, rc;
Tang Yuantian555eae92013-04-09 16:46:26 +08001002
Scott Wood0dfc86b2015-09-19 23:29:54 -05001003 legacy_init_clockgen(np);
Tang Yuantian555eae92013-04-09 16:46:26 +08001004
Scott Wood0dfc86b2015-09-19 23:29:54 -05001005 if (of_address_to_resource(np, 0, &res))
Tang Yuantian555eae92013-04-09 16:46:26 +08001006 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001007
Scott Wood0dfc86b2015-09-19 23:29:54 -05001008 idx = (res.start & 0xf0) >> 5;
1009 clk = clockgen.cmux[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +08001010
1011 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
1012 if (rc) {
Rob Herringe665f022018-08-28 10:44:29 -05001013 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1014 __func__, np, rc);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001015 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001016 }
Tang Yuantian555eae92013-04-09 16:46:26 +08001017}
1018
Julia Lawall3432a2e2016-04-18 16:55:34 +02001019static struct clk __init
1020*sysclk_from_fixed(struct device_node *node, const char *name)
Tang Yuantian555eae92013-04-09 16:46:26 +08001021{
Scott Wood0dfc86b2015-09-19 23:29:54 -05001022 u32 rate;
Tang Yuantian555eae92013-04-09 16:46:26 +08001023
Scott Wood0dfc86b2015-09-19 23:29:54 -05001024 if (of_property_read_u32(node, "clock-frequency", &rate))
1025 return ERR_PTR(-ENODEV);
1026
Stephen Boydec3f2fc2016-03-01 11:00:19 -08001027 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001028}
1029
Scott Wood80b4ae72017-03-20 10:37:23 +08001030static struct clk __init *input_clock(const char *name, struct clk *clk)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001031{
Scott Wood80b4ae72017-03-20 10:37:23 +08001032 const char *input_name;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001033
1034 /* Register the input clock under the desired name. */
Scott Wood80b4ae72017-03-20 10:37:23 +08001035 input_name = __clk_get_name(clk);
1036 clk = clk_register_fixed_factor(NULL, name, input_name,
Scott Wood0dfc86b2015-09-19 23:29:54 -05001037 0, 1, 1);
1038 if (IS_ERR(clk))
1039 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
1040 PTR_ERR(clk));
1041
1042 return clk;
1043}
1044
Scott Wood80b4ae72017-03-20 10:37:23 +08001045static struct clk __init *input_clock_by_name(const char *name,
1046 const char *dtname)
1047{
1048 struct clk *clk;
1049
1050 clk = of_clk_get_by_name(clockgen.node, dtname);
1051 if (IS_ERR(clk))
1052 return clk;
1053
1054 return input_clock(name, clk);
1055}
1056
1057static struct clk __init *input_clock_by_index(const char *name, int idx)
1058{
1059 struct clk *clk;
1060
1061 clk = of_clk_get(clockgen.node, 0);
1062 if (IS_ERR(clk))
1063 return clk;
1064
1065 return input_clock(name, clk);
1066}
1067
Scott Wood0dfc86b2015-09-19 23:29:54 -05001068static struct clk * __init create_sysclk(const char *name)
1069{
1070 struct device_node *sysclk;
1071 struct clk *clk;
1072
1073 clk = sysclk_from_fixed(clockgen.node, name);
1074 if (!IS_ERR(clk))
1075 return clk;
1076
Scott Wood80b4ae72017-03-20 10:37:23 +08001077 clk = input_clock_by_name(name, "sysclk");
1078 if (!IS_ERR(clk))
1079 return clk;
1080
1081 clk = input_clock_by_index(name, 0);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001082 if (!IS_ERR(clk))
1083 return clk;
1084
1085 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
1086 if (sysclk) {
1087 clk = sysclk_from_fixed(sysclk, name);
1088 if (!IS_ERR(clk))
1089 return clk;
1090 }
1091
Scott Wood80b4ae72017-03-20 10:37:23 +08001092 pr_err("%s: No input sysclk\n", __func__);
1093 return NULL;
1094}
1095
1096static struct clk * __init create_coreclk(const char *name)
1097{
1098 struct clk *clk;
1099
1100 clk = input_clock_by_name(name, "coreclk");
1101 if (!IS_ERR(clk))
1102 return clk;
1103
1104 /*
1105 * This indicates a mix of legacy nodes with the new coreclk
1106 * mechanism, which should never happen. If this error occurs,
1107 * don't use the wrong input clock just because coreclk isn't
1108 * ready yet.
1109 */
1110 if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
1111 return clk;
1112
Scott Wood0dfc86b2015-09-19 23:29:54 -05001113 return NULL;
1114}
1115
1116/* Legacy node */
1117static void __init sysclk_init(struct device_node *node)
1118{
1119 struct clk *clk;
1120
1121 legacy_init_clockgen(node);
1122
1123 clk = clockgen.sysclk;
1124 if (clk)
1125 of_clk_add_provider(node, of_clk_src_simple_get, clk);
1126}
1127
1128#define PLL_KILL BIT(31)
1129
1130static void __init create_one_pll(struct clockgen *cg, int idx)
1131{
1132 u32 __iomem *reg;
1133 u32 mult;
1134 struct clockgen_pll *pll = &cg->pll[idx];
Scott Wood80b4ae72017-03-20 10:37:23 +08001135 const char *input = "cg-sysclk";
Scott Wood0dfc86b2015-09-19 23:29:54 -05001136 int i;
1137
1138 if (!(cg->info.pll_mask & (1 << idx)))
1139 return;
1140
Scott Wood80b4ae72017-03-20 10:37:23 +08001141 if (cg->coreclk && idx != PLATFORM_PLL) {
1142 if (IS_ERR(cg->coreclk))
1143 return;
1144
1145 input = "cg-coreclk";
1146 }
1147
Scott Wood9e19ca22015-09-19 23:29:55 -05001148 if (cg->info.flags & CG_VER3) {
1149 switch (idx) {
1150 case PLATFORM_PLL:
1151 reg = cg->regs + 0x60080;
1152 break;
1153 case CGA_PLL1:
1154 reg = cg->regs + 0x80;
1155 break;
1156 case CGA_PLL2:
1157 reg = cg->regs + 0xa0;
1158 break;
1159 case CGB_PLL1:
1160 reg = cg->regs + 0x10080;
1161 break;
1162 case CGB_PLL2:
1163 reg = cg->regs + 0x100a0;
1164 break;
1165 default:
1166 WARN_ONCE(1, "index %d\n", idx);
1167 return;
1168 }
1169 } else {
1170 if (idx == PLATFORM_PLL)
1171 reg = cg->regs + 0xc00;
1172 else
1173 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1174 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001175
1176 /* Get the multiple of PLL */
Scott Wood9e19ca22015-09-19 23:29:55 -05001177 mult = cg_in(cg, reg);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001178
1179 /* Check if this PLL is disabled */
1180 if (mult & PLL_KILL) {
1181 pr_debug("%s(): pll %p disabled\n", __func__, reg);
Tang Yuantian555eae92013-04-09 16:46:26 +08001182 return;
1183 }
1184
Scott Wood9e19ca22015-09-19 23:29:55 -05001185 if ((cg->info.flags & CG_VER3) ||
1186 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
Scott Wood0dfc86b2015-09-19 23:29:54 -05001187 mult = (mult & GENMASK(8, 1)) >> 1;
1188 else
1189 mult = (mult & GENMASK(6, 1)) >> 1;
Tang Yuantian555eae92013-04-09 16:46:26 +08001190
Scott Wood0dfc86b2015-09-19 23:29:54 -05001191 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1192 struct clk *clk;
Yuantian Tang45899dc2017-04-06 10:21:23 +08001193 int ret;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001194
Yuantian Tang36ab0462017-11-22 09:40:53 +08001195 /*
Yuantian Tangcc61ab92019-04-22 17:15:09 +08001196 * For platform PLL, there are MAX_PLL_DIV divider clocks.
Yuantian Tang36ab0462017-11-22 09:40:53 +08001197 * For core PLL, there are 4 divider clocks at most.
1198 */
1199 if (idx != PLATFORM_PLL && i >= 4)
1200 break;
1201
Scott Wood0dfc86b2015-09-19 23:29:54 -05001202 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1203 "cg-pll%d-div%d", idx, i + 1);
1204
1205 clk = clk_register_fixed_factor(NULL,
Scott Wood80b4ae72017-03-20 10:37:23 +08001206 pll->div[i].name, input, 0, mult, i + 1);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001207 if (IS_ERR(clk)) {
1208 pr_err("%s: %s: register failed %ld\n",
1209 __func__, pll->div[i].name, PTR_ERR(clk));
1210 continue;
1211 }
1212
1213 pll->div[i].clk = clk;
Yuantian Tang45899dc2017-04-06 10:21:23 +08001214 ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
1215 if (ret != 0)
Dan Carpenter8f99f5e2019-02-18 12:19:06 +03001216 pr_err("%s: %s: register to lookup table failed %d\n",
1217 __func__, pll->div[i].name, ret);
Yuantian Tang45899dc2017-04-06 10:21:23 +08001218
Tang Yuantian555eae92013-04-09 16:46:26 +08001219 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001220}
Tang Yuantian555eae92013-04-09 16:46:26 +08001221
Scott Wood0dfc86b2015-09-19 23:29:54 -05001222static void __init create_plls(struct clockgen *cg)
1223{
1224 int i;
Tang Yuantian555eae92013-04-09 16:46:26 +08001225
Scott Wood0dfc86b2015-09-19 23:29:54 -05001226 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1227 create_one_pll(cg, i);
1228}
1229
1230static void __init legacy_pll_init(struct device_node *np, int idx)
1231{
1232 struct clockgen_pll *pll;
1233 struct clk_onecell_data *onecell_data;
1234 struct clk **subclks;
1235 int count, rc;
1236
1237 legacy_init_clockgen(np);
1238
1239 pll = &clockgen.pll[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +08001240 count = of_property_count_strings(np, "clock-output-names");
Tang Yuantian555eae92013-04-09 16:46:26 +08001241
Scott Wood0dfc86b2015-09-19 23:29:54 -05001242 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1243 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001244 if (!subclks)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001245 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001246
Emil Medve6ef1cca2015-01-21 04:03:28 -06001247 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001248 if (!onecell_data)
Tang Yuantian555eae92013-04-09 16:46:26 +08001249 goto err_clks;
Tang Yuantian555eae92013-04-09 16:46:26 +08001250
Scott Wood0dfc86b2015-09-19 23:29:54 -05001251 if (count <= 3) {
1252 subclks[0] = pll->div[0].clk;
1253 subclks[1] = pll->div[1].clk;
1254 subclks[2] = pll->div[3].clk;
1255 } else {
1256 subclks[0] = pll->div[0].clk;
1257 subclks[1] = pll->div[1].clk;
1258 subclks[2] = pll->div[2].clk;
1259 subclks[3] = pll->div[3].clk;
Tang Yuantian555eae92013-04-09 16:46:26 +08001260 }
1261
1262 onecell_data->clks = subclks;
1263 onecell_data->clk_num = count;
1264
1265 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1266 if (rc) {
Rob Herringe665f022018-08-28 10:44:29 -05001267 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1268 __func__, np, rc);
Tang Yuantian555eae92013-04-09 16:46:26 +08001269 goto err_cell;
1270 }
1271
1272 return;
1273err_cell:
1274 kfree(onecell_data);
1275err_clks:
1276 kfree(subclks);
Tang Yuantian00fa6e52014-01-21 09:32:45 +08001277}
1278
Scott Wood0dfc86b2015-09-19 23:29:54 -05001279/* Legacy node */
Emil Medvea513b722015-01-21 04:03:31 -06001280static void __init pltfrm_pll_init(struct device_node *np)
1281{
Scott Wood0dfc86b2015-09-19 23:29:54 -05001282 legacy_pll_init(np, PLATFORM_PLL);
1283}
Emil Medvea513b722015-01-21 04:03:31 -06001284
Scott Wood0dfc86b2015-09-19 23:29:54 -05001285/* Legacy node */
1286static void __init core_pll_init(struct device_node *np)
1287{
1288 struct resource res;
1289 int idx;
1290
1291 if (of_address_to_resource(np, 0, &res))
1292 return;
1293
1294 if ((res.start & 0xfff) == 0xc00) {
1295 /*
1296 * ls1021a devtree labels the platform PLL
1297 * with the core PLL compatible
1298 */
1299 pltfrm_pll_init(np);
1300 } else {
1301 idx = (res.start & 0xf0) >> 5;
1302 legacy_pll_init(np, CGA_PLL1 + idx);
1303 }
1304}
1305
1306static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1307{
1308 struct clockgen *cg = data;
1309 struct clk *clk;
1310 struct clockgen_pll *pll;
1311 u32 type, idx;
1312
1313 if (clkspec->args_count < 2) {
1314 pr_err("%s: insufficient phandle args\n", __func__);
1315 return ERR_PTR(-EINVAL);
1316 }
1317
1318 type = clkspec->args[0];
1319 idx = clkspec->args[1];
1320
1321 switch (type) {
1322 case 0:
1323 if (idx != 0)
1324 goto bad_args;
1325 clk = cg->sysclk;
1326 break;
1327 case 1:
1328 if (idx >= ARRAY_SIZE(cg->cmux))
1329 goto bad_args;
1330 clk = cg->cmux[idx];
1331 break;
1332 case 2:
1333 if (idx >= ARRAY_SIZE(cg->hwaccel))
1334 goto bad_args;
1335 clk = cg->hwaccel[idx];
1336 break;
1337 case 3:
1338 if (idx >= ARRAY_SIZE(cg->fman))
1339 goto bad_args;
1340 clk = cg->fman[idx];
1341 break;
1342 case 4:
1343 pll = &cg->pll[PLATFORM_PLL];
1344 if (idx >= ARRAY_SIZE(pll->div))
1345 goto bad_args;
1346 clk = pll->div[idx].clk;
1347 break;
Scott Wood80b4ae72017-03-20 10:37:23 +08001348 case 5:
1349 if (idx != 0)
1350 goto bad_args;
1351 clk = cg->coreclk;
1352 if (IS_ERR(clk))
1353 clk = NULL;
1354 break;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001355 default:
1356 goto bad_args;
1357 }
1358
1359 if (!clk)
1360 return ERR_PTR(-ENOENT);
1361 return clk;
1362
1363bad_args:
1364 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1365 return ERR_PTR(-EINVAL);
1366}
1367
1368#ifdef CONFIG_PPC
1369#include <asm/mpc85xx.h>
1370
1371static const u32 a4510_svrs[] __initconst = {
1372 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1373 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1374 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1375 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1376 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1377 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1378 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1379 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1380 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1381 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1382 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1383 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1384 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1385};
1386
1387#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1388
1389static bool __init has_erratum_a4510(void)
1390{
1391 u32 svr = mfspr(SPRN_SVR);
1392 int i;
1393
1394 svr &= ~SVR_SECURITY;
1395
1396 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1397 if (svr == a4510_svrs[i])
1398 return true;
1399 }
1400
1401 return false;
1402}
1403#else
1404static bool __init has_erratum_a4510(void)
1405{
1406 return false;
1407}
1408#endif
1409
1410static void __init clockgen_init(struct device_node *np)
1411{
1412 int i, ret;
1413 bool is_old_ls1021a = false;
1414
1415 /* May have already been called by a legacy probe */
1416 if (clockgen.node)
1417 return;
1418
1419 clockgen.node = np;
1420 clockgen.regs = of_iomap(np, 0);
1421 if (!clockgen.regs &&
1422 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1423 /* Compatibility hack for old, broken device trees */
1424 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1425 is_old_ls1021a = true;
1426 }
1427 if (!clockgen.regs) {
Rob Herringe665f022018-08-28 10:44:29 -05001428 pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np);
Emil Medvea513b722015-01-21 04:03:31 -06001429 return;
1430 }
1431
Scott Wood0dfc86b2015-09-19 23:29:54 -05001432 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1433 if (of_device_is_compatible(np, chipinfo[i].compat))
1434 break;
1435 if (is_old_ls1021a &&
1436 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1437 break;
Emil Medvea513b722015-01-21 04:03:31 -06001438 }
1439
Scott Wood0dfc86b2015-09-19 23:29:54 -05001440 if (i == ARRAY_SIZE(chipinfo)) {
Rob Herring16673932017-07-18 16:42:52 -05001441 pr_err("%s: unknown clockgen node %pOF\n", __func__, np);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001442 goto err;
Emil Medvea513b722015-01-21 04:03:31 -06001443 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001444 clockgen.info = chipinfo[i];
Emil Medvea513b722015-01-21 04:03:31 -06001445
Scott Wood0dfc86b2015-09-19 23:29:54 -05001446 if (clockgen.info.guts_compat) {
1447 struct device_node *guts;
Emil Medvea513b722015-01-21 04:03:31 -06001448
Scott Wood0dfc86b2015-09-19 23:29:54 -05001449 guts = of_find_compatible_node(NULL, NULL,
1450 clockgen.info.guts_compat);
1451 if (guts) {
1452 clockgen.guts = of_iomap(guts, 0);
1453 if (!clockgen.guts) {
Rob Herring16673932017-07-18 16:42:52 -05001454 pr_err("%s: Couldn't map %pOF regs\n", __func__,
1455 guts);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001456 }
Yangtao Li70af6c52018-12-26 08:14:42 -05001457 of_node_put(guts);
Emil Medvea513b722015-01-21 04:03:31 -06001458 }
1459
Emil Medvea513b722015-01-21 04:03:31 -06001460 }
1461
Scott Wood0dfc86b2015-09-19 23:29:54 -05001462 if (has_erratum_a4510())
1463 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1464
1465 clockgen.sysclk = create_sysclk("cg-sysclk");
Scott Wood80b4ae72017-03-20 10:37:23 +08001466 clockgen.coreclk = create_coreclk("cg-coreclk");
Scott Wood0dfc86b2015-09-19 23:29:54 -05001467 create_plls(&clockgen);
1468 create_muxes(&clockgen);
1469
1470 if (clockgen.info.init_periph)
1471 clockgen.info.init_periph(&clockgen);
1472
1473 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1474 if (ret) {
Rob Herringe665f022018-08-28 10:44:29 -05001475 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1476 __func__, np, ret);
Emil Medvea513b722015-01-21 04:03:31 -06001477 }
1478
1479 return;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001480err:
1481 iounmap(clockgen.regs);
1482 clockgen.regs = NULL;
Emil Medvea513b722015-01-21 04:03:31 -06001483}
1484
Scott Wood0dfc86b2015-09-19 23:29:54 -05001485CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1486CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
Yuantian Tangb8b211c2018-10-31 15:46:16 +08001487CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
1488CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
Tang Yuantian44709352016-11-24 10:36:55 +08001489CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001490CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
Yuantian Tang95089f62019-04-24 09:19:12 +08001491CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init);
Hou Zhiqiange994412c2015-10-23 16:01:21 +08001492CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
Mingkai Hu80e52192016-09-07 11:48:30 +08001493CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
Yuantian Tange0c888c42017-04-06 10:21:22 +08001494CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
Scott Wood9e19ca22015-09-19 23:29:55 -05001495CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
Yuantian Tangb8b211c2018-10-31 15:46:16 +08001496CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init);
1497CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init);
1498CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init);
1499CLK_OF_DECLARE(qoriq_clockgen_p5020, "fsl,p5020-clockgen", clockgen_init);
1500CLK_OF_DECLARE(qoriq_clockgen_p5040, "fsl,p5040-clockgen", clockgen_init);
1501CLK_OF_DECLARE(qoriq_clockgen_t1023, "fsl,t1023-clockgen", clockgen_init);
1502CLK_OF_DECLARE(qoriq_clockgen_t1040, "fsl,t1040-clockgen", clockgen_init);
1503CLK_OF_DECLARE(qoriq_clockgen_t2080, "fsl,t2080-clockgen", clockgen_init);
1504CLK_OF_DECLARE(qoriq_clockgen_t4240, "fsl,t4240-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001505
1506/* Legacy nodes */
Kevin Hao66619ac2014-12-03 16:53:53 +08001507CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1508CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1509CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1510CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1511CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1512CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
Emil Medvea513b722015-01-21 04:03:31 -06001513CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1514CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);