blob: b4e6bdd2172c44d0a8d5d04ca164cb15793d3ef2 [file] [log] [blame]
Shefali Jain0dc6e782017-11-27 13:06:27 +05301/*
2 * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/clk.h>
18#include <linux/io.h>
19#include <linux/clk/msm-clk-provider.h>
20#include <linux/clk/msm-clock-generic.h>
21#include <soc/qcom/msm-clock-controller.h>
22
23/* ==================== Mux clock ==================== */
24
25static int mux_parent_to_src_sel(struct mux_clk *mux, struct clk *p)
26{
27 return parent_to_src_sel(mux->parents, mux->num_parents, p);
28}
29
30static int mux_set_parent(struct clk *c, struct clk *p)
31{
32 struct mux_clk *mux = to_mux_clk(c);
33 int sel = mux_parent_to_src_sel(mux, p);
34 struct clk *old_parent;
35 int rc = 0, i;
36 unsigned long flags;
37
38 if (sel < 0 && mux->rec_parents) {
39 for (i = 0; i < mux->num_rec_parents; i++) {
40 rc = clk_set_parent(mux->rec_parents[i], p);
41 if (!rc) {
42 /*
43 * This is necessary to ensure prepare/enable
44 * counts get propagated correctly.
45 */
46 p = mux->rec_parents[i];
47 sel = mux_parent_to_src_sel(mux, p);
48 break;
49 }
50 }
51 }
52
53 if (sel < 0)
54 return sel;
55
56 rc = __clk_pre_reparent(c, p, &flags);
57 if (rc)
58 goto out;
59
60 rc = mux->ops->set_mux_sel(mux, sel);
61 if (rc)
62 goto set_fail;
63
64 old_parent = c->parent;
65 c->parent = p;
66 c->rate = clk_get_rate(p);
67 __clk_post_reparent(c, old_parent, &flags);
68
69 return 0;
70
71set_fail:
72 __clk_post_reparent(c, p, &flags);
73out:
74 return rc;
75}
76
77static long mux_round_rate(struct clk *c, unsigned long rate)
78{
79 struct mux_clk *mux = to_mux_clk(c);
80 int i;
81 unsigned long prate, rrate = 0;
82
83 for (i = 0; i < mux->num_parents; i++) {
84 prate = clk_round_rate(mux->parents[i].src, rate);
85 if (is_better_rate(rate, rrate, prate))
86 rrate = prate;
87 }
88 if (!rrate)
89 return -EINVAL;
90
91 return rrate;
92}
93
94static int mux_set_rate(struct clk *c, unsigned long rate)
95{
96 struct mux_clk *mux = to_mux_clk(c);
97 struct clk *new_parent = NULL;
98 int rc = 0, i;
99 unsigned long new_par_curr_rate;
100 unsigned long flags;
101
102 /*
103 * Check if one of the possible parents is already at the requested
104 * rate.
105 */
106 for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) {
107 struct clk *p = mux->parents[i].src;
108
109 if (p->rate == rate && clk_round_rate(p, rate) == rate) {
110 new_parent = mux->parents[i].src;
111 break;
112 }
113 }
114
115 for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) {
116 if (clk_round_rate(mux->parents[i].src, rate) == rate) {
117 new_parent = mux->parents[i].src;
118 if (!mux->try_new_parent)
119 break;
120 if (mux->try_new_parent && new_parent != c->parent)
121 break;
122 }
123 }
124
125 if (new_parent == NULL)
126 return -EINVAL;
127
128 /*
129 * Switch to safe parent since the old and new parent might be the
130 * same and the parent might temporarily turn off while switching
131 * rates. If the mux can switch between distinct sources safely
132 * (indicated by try_new_parent), and the new source is not the current
133 * parent, do not switch to the safe parent.
134 */
135 if (mux->safe_sel >= 0 &&
136 !(mux->try_new_parent && (new_parent != c->parent))) {
137 /*
138 * The safe parent might be a clock with multiple sources;
139 * to select the "safe" source, set a safe frequency.
140 */
141 if (mux->safe_freq) {
142 rc = clk_set_rate(mux->safe_parent, mux->safe_freq);
143 if (rc) {
144 pr_err("Failed to set safe rate on %s\n",
145 clk_name(mux->safe_parent));
146 return rc;
147 }
148 }
149
150 /*
151 * Some mux implementations might switch to/from a low power
152 * parent as part of their disable/enable ops. Grab the
153 * enable lock to avoid racing with these implementations.
154 */
155 spin_lock_irqsave(&c->lock, flags);
156 rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
157 spin_unlock_irqrestore(&c->lock, flags);
158 if (rc)
159 return rc;
160
161 }
162
163 new_par_curr_rate = clk_get_rate(new_parent);
164 rc = clk_set_rate(new_parent, rate);
165 if (rc)
166 goto set_rate_fail;
167
168 rc = mux_set_parent(c, new_parent);
169 if (rc)
170 goto set_par_fail;
171
172 return 0;
173
174set_par_fail:
175 clk_set_rate(new_parent, new_par_curr_rate);
176set_rate_fail:
177 WARN(mux->ops->set_mux_sel(mux,
178 mux_parent_to_src_sel(mux, c->parent)),
179 "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
180 return rc;
181}
182
183static int mux_enable(struct clk *c)
184{
185 struct mux_clk *mux = to_mux_clk(c);
186
187 if (mux->ops->enable)
188 return mux->ops->enable(mux);
189 return 0;
190}
191
192static void mux_disable(struct clk *c)
193{
194 struct mux_clk *mux = to_mux_clk(c);
195
196 if (mux->ops->disable)
197 return mux->ops->disable(mux);
198}
199
200static struct clk *mux_get_parent(struct clk *c)
201{
202 struct mux_clk *mux = to_mux_clk(c);
203 int sel = mux->ops->get_mux_sel(mux);
204 int i;
205
206 for (i = 0; i < mux->num_parents; i++) {
207 if (mux->parents[i].sel == sel)
208 return mux->parents[i].src;
209 }
210
211 /* Unfamiliar parent. */
212 return NULL;
213}
214
215static enum handoff mux_handoff(struct clk *c)
216{
217 struct mux_clk *mux = to_mux_clk(c);
218
219 c->rate = clk_get_rate(c->parent);
220 mux->safe_sel = mux_parent_to_src_sel(mux, mux->safe_parent);
221
222 if (mux->en_mask && mux->ops && mux->ops->is_enabled)
223 return mux->ops->is_enabled(mux)
224 ? HANDOFF_ENABLED_CLK
225 : HANDOFF_DISABLED_CLK;
226
227 /*
228 * If this function returns 'enabled' even when the clock downstream
229 * of this clock is disabled, then handoff code will unnecessarily
230 * enable the current parent of this clock. If this function always
231 * returns 'disabled' and a clock downstream is on, the clock handoff
232 * code will bump up the ref count for this clock and its current
233 * parent as necessary. So, clocks without an actual HW gate can
234 * always return disabled.
235 */
236 return HANDOFF_DISABLED_CLK;
237}
238
239static void __iomem *mux_clk_list_registers(struct clk *c, int n,
240 struct clk_register_data **regs, u32 *size)
241{
242 struct mux_clk *mux = to_mux_clk(c);
243
244 if (mux->ops && mux->ops->list_registers)
245 return mux->ops->list_registers(mux, n, regs, size);
246
247 return ERR_PTR(-EINVAL);
248}
249
250const struct clk_ops clk_ops_gen_mux = {
251 .enable = mux_enable,
252 .disable = mux_disable,
253 .set_parent = mux_set_parent,
254 .round_rate = mux_round_rate,
255 .set_rate = mux_set_rate,
256 .handoff = mux_handoff,
257 .get_parent = mux_get_parent,
258 .list_registers = mux_clk_list_registers,
259};
260
261/* ==================== Divider clock ==================== */
262
263static long __div_round_rate(struct div_data *data, unsigned long rate,
264 struct clk *parent, unsigned int *best_div, unsigned long *best_prate)
265{
266 unsigned int div, min_div, max_div, _best_div = 1;
267 unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
268 unsigned int numer;
269
270 rate = max(rate, 1UL);
271
272 min_div = max(data->min_div, 1U);
273 max_div = min(data->max_div, (unsigned int) (ULONG_MAX));
274
275 /*
276 * div values are doubled for half dividers.
277 * Adjust for that by picking a numer of 2.
278 */
279 numer = data->is_half_divider ? 2 : 1;
280
281 for (div = min_div; div <= max_div; div++) {
282 if (data->skip_odd_div && (div & 1))
283 if (!(data->allow_div_one && (div == 1)))
284 continue;
285 if (data->skip_even_div && !(div & 1))
286 continue;
287 req_prate = mult_frac(rate, div, numer);
288 prate = clk_round_rate(parent, req_prate);
289 if (IS_ERR_VALUE(prate))
290 break;
291
292 actual_rate = mult_frac(prate, numer, div);
293 if (is_better_rate(rate, rrate, actual_rate)) {
294 rrate = actual_rate;
295 _best_div = div;
296 _best_prate = prate;
297 }
298
299 /*
300 * Trying higher dividers is only going to ask the parent for
301 * a higher rate. If it can't even output a rate higher than
302 * the one we request for this divider, the parent is not
303 * going to be able to output an even higher rate required
304 * for a higher divider. So, stop trying higher dividers.
305 */
306 if (actual_rate < rate)
307 break;
308
309 if (rrate <= rate + data->rate_margin)
310 break;
311 }
312
313 if (!rrate)
314 return -EINVAL;
315 if (best_div)
316 *best_div = _best_div;
317 if (best_prate)
318 *best_prate = _best_prate;
319
320 return rrate;
321}
322
323static long div_round_rate(struct clk *c, unsigned long rate)
324{
325 struct div_clk *d = to_div_clk(c);
326
327 return __div_round_rate(&d->data, rate, c->parent, NULL, NULL);
328}
329
330static int _find_safe_div(struct clk *c, unsigned long rate)
331{
332 struct div_clk *d = to_div_clk(c);
333 struct div_data *data = &d->data;
334 unsigned long fast = max(rate, c->rate);
335 unsigned int numer = data->is_half_divider ? 2 : 1;
336 int i, safe_div = 0;
337
338 if (!d->safe_freq)
339 return 0;
340
341 /* Find the max safe freq that is lesser than fast */
342 for (i = data->max_div; i >= data->min_div; i--)
343 if (mult_frac(d->safe_freq, numer, i) <= fast)
344 safe_div = i;
345
346 return safe_div ?: -EINVAL;
347}
348
349static int div_set_rate(struct clk *c, unsigned long rate)
350{
351 struct div_clk *d = to_div_clk(c);
352 int safe_div, div, rc = 0;
353 long rrate, old_prate, new_prate;
354 struct div_data *data = &d->data;
355
356 rrate = __div_round_rate(data, rate, c->parent, &div, &new_prate);
357 if (rrate < rate || rrate > rate + data->rate_margin)
358 return -EINVAL;
359
360 /*
361 * For fixed divider clock we don't want to return an error if the
362 * requested rate matches the achievable rate. So, don't check for
363 * !d->ops and return an error. __div_round_rate() ensures div ==
364 * d->div if !d->ops.
365 */
366
367 safe_div = _find_safe_div(c, rate);
368 if (d->safe_freq && safe_div < 0) {
369 pr_err("No safe div on %s for transitioning from %lu to %lu\n",
370 c->dbg_name, c->rate, rate);
371 return -EINVAL;
372 }
373
374 safe_div = max(safe_div, div);
375
376 if (safe_div > data->div) {
377 rc = d->ops->set_div(d, safe_div);
378 if (rc) {
379 pr_err("Failed to set div %d on %s\n", safe_div,
380 c->dbg_name);
381 return rc;
382 }
383 }
384
385 old_prate = clk_get_rate(c->parent);
386 rc = clk_set_rate(c->parent, new_prate);
387 if (rc)
388 goto set_rate_fail;
389
390 if (div < data->div)
391 rc = d->ops->set_div(d, div);
392 else if (div < safe_div)
393 rc = d->ops->set_div(d, div);
394 if (rc)
395 goto div_dec_fail;
396
397 data->div = div;
398
399 return 0;
400
401div_dec_fail:
402 WARN(clk_set_rate(c->parent, old_prate),
403 "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
404set_rate_fail:
405 if (safe_div > data->div)
406 WARN(d->ops->set_div(d, data->div),
407 "Set rate failed for %s. Also in bad state!\n",
408 c->dbg_name);
409 return rc;
410}
411
412static int div_enable(struct clk *c)
413{
414 struct div_clk *d = to_div_clk(c);
415
416 if (d->ops && d->ops->enable)
417 return d->ops->enable(d);
418 return 0;
419}
420
421static void div_disable(struct clk *c)
422{
423 struct div_clk *d = to_div_clk(c);
424
425 if (d->ops && d->ops->disable)
426 return d->ops->disable(d);
427}
428
429static enum handoff div_handoff(struct clk *c)
430{
431 struct div_clk *d = to_div_clk(c);
432 unsigned int div = d->data.div;
433
434 if (d->ops && d->ops->get_div)
435 div = max(d->ops->get_div(d), 1);
436 div = max(div, 1U);
437 c->rate = clk_get_rate(c->parent) / div;
438
439 if (!d->ops || !d->ops->set_div)
440 d->data.min_div = d->data.max_div = div;
441 d->data.div = div;
442
443 if (d->en_mask && d->ops && d->ops->is_enabled)
444 return d->ops->is_enabled(d)
445 ? HANDOFF_ENABLED_CLK
446 : HANDOFF_DISABLED_CLK;
447
448 /*
449 * If this function returns 'enabled' even when the clock downstream
450 * of this clock is disabled, then handoff code will unnecessarily
451 * enable the current parent of this clock. If this function always
452 * returns 'disabled' and a clock downstream is on, the clock handoff
453 * code will bump up the ref count for this clock and its current
454 * parent as necessary. So, clocks without an actual HW gate can
455 * always return disabled.
456 */
457 return HANDOFF_DISABLED_CLK;
458}
459
460static void __iomem *div_clk_list_registers(struct clk *c, int n,
461 struct clk_register_data **regs, u32 *size)
462{
463 struct div_clk *d = to_div_clk(c);
464
465 if (d->ops && d->ops->list_registers)
466 return d->ops->list_registers(d, n, regs, size);
467
468 return ERR_PTR(-EINVAL);
469}
470
471const struct clk_ops clk_ops_div = {
472 .enable = div_enable,
473 .disable = div_disable,
474 .round_rate = div_round_rate,
475 .set_rate = div_set_rate,
476 .handoff = div_handoff,
477 .list_registers = div_clk_list_registers,
478};
479
480static long __slave_div_round_rate(struct clk *c, unsigned long rate,
481 int *best_div)
482{
483 struct div_clk *d = to_div_clk(c);
484 unsigned int div, min_div, max_div;
485 long p_rate;
486
487 rate = max(rate, 1UL);
488
489 min_div = d->data.min_div;
490 max_div = d->data.max_div;
491
492 p_rate = clk_get_rate(c->parent);
493 div = DIV_ROUND_CLOSEST(p_rate, rate);
494 div = max(div, min_div);
495 div = min(div, max_div);
496 if (best_div)
497 *best_div = div;
498
499 return p_rate / div;
500}
501
502static long slave_div_round_rate(struct clk *c, unsigned long rate)
503{
504 return __slave_div_round_rate(c, rate, NULL);
505}
506
507static int slave_div_set_rate(struct clk *c, unsigned long rate)
508{
509 struct div_clk *d = to_div_clk(c);
510 int div, rc = 0;
511 long rrate;
512
513 rrate = __slave_div_round_rate(c, rate, &div);
514 if (rrate != rate)
515 return -EINVAL;
516
517 if (div == d->data.div)
518 return 0;
519
520 /*
521 * For fixed divider clock we don't want to return an error if the
522 * requested rate matches the achievable rate. So, don't check for
523 * !d->ops and return an error. __slave_div_round_rate() ensures
524 * div == d->data.div if !d->ops.
525 */
526 rc = d->ops->set_div(d, div);
527 if (rc)
528 return rc;
529
530 d->data.div = div;
531
532 return 0;
533}
534
535static unsigned long slave_div_get_rate(struct clk *c)
536{
537 struct div_clk *d = to_div_clk(c);
538
539 if (!d->data.div)
540 return 0;
541 return clk_get_rate(c->parent) / d->data.div;
542}
543
544const struct clk_ops clk_ops_slave_div = {
545 .enable = div_enable,
546 .disable = div_disable,
547 .round_rate = slave_div_round_rate,
548 .set_rate = slave_div_set_rate,
549 .get_rate = slave_div_get_rate,
550 .handoff = div_handoff,
551 .list_registers = div_clk_list_registers,
552};
553
554
555/**
556 * External clock
557 * Some clock controllers have input clock signal that come from outside the
558 * clock controller. That input clock signal might then be used as a source for
559 * several clocks inside the clock controller. This external clock
560 * implementation models this input clock signal by just passing on the requests
561 * to the clock's parent, the original external clock source. The driver for the
562 * clock controller should clk_get() the original external clock in the probe
563 * function and set is as a parent to this external clock..
564 */
565
566long parent_round_rate(struct clk *c, unsigned long rate)
567{
568 return clk_round_rate(c->parent, rate);
569}
570
571int parent_set_rate(struct clk *c, unsigned long rate)
572{
573 return clk_set_rate(c->parent, rate);
574}
575
576unsigned long parent_get_rate(struct clk *c)
577{
578 return clk_get_rate(c->parent);
579}
580
581static int ext_set_parent(struct clk *c, struct clk *p)
582{
583 return clk_set_parent(c->parent, p);
584}
585
586static struct clk *ext_get_parent(struct clk *c)
587{
588 struct ext_clk *ext = to_ext_clk(c);
589
590 if (!IS_ERR_OR_NULL(c->parent))
591 return c->parent;
592 return clk_get(ext->dev, ext->clk_id);
593}
594
595static enum handoff ext_handoff(struct clk *c)
596{
597 c->rate = clk_get_rate(c->parent);
598 /* Similar reasoning applied in div_handoff, see comment there. */
599 return HANDOFF_DISABLED_CLK;
600}
601
602const struct clk_ops clk_ops_ext = {
603 .handoff = ext_handoff,
604 .round_rate = parent_round_rate,
605 .set_rate = parent_set_rate,
606 .get_rate = parent_get_rate,
607 .set_parent = ext_set_parent,
608 .get_parent = ext_get_parent,
609};
610
611static void *ext_clk_dt_parser(struct device *dev, struct device_node *np)
612{
613 struct ext_clk *ext;
614 const char *str;
615 int rc;
616
617 ext = devm_kzalloc(dev, sizeof(*ext), GFP_KERNEL);
618 if (!ext)
619 return ERR_PTR(-ENOMEM);
620
621 ext->dev = dev;
622 rc = of_property_read_string(np, "qcom,clock-names", &str);
623 if (!rc)
624 ext->clk_id = (void *)str;
625
626 ext->c.ops = &clk_ops_ext;
627 return msmclk_generic_clk_init(dev, np, &ext->c);
628}
629MSMCLK_PARSER(ext_clk_dt_parser, "qcom,ext-clk", 0);
630
631/* ==================== Mux_div clock ==================== */
632
633static int mux_div_clk_enable(struct clk *c)
634{
635 struct mux_div_clk *md = to_mux_div_clk(c);
636
637 if (md->ops->enable)
638 return md->ops->enable(md);
639 return 0;
640}
641
642static void mux_div_clk_disable(struct clk *c)
643{
644 struct mux_div_clk *md = to_mux_div_clk(c);
645
646 if (md->ops->disable)
647 return md->ops->disable(md);
648}
649
650static long __mux_div_round_rate(struct clk *c, unsigned long rate,
651 struct clk **best_parent, int *best_div, unsigned long *best_prate)
652{
653 struct mux_div_clk *md = to_mux_div_clk(c);
654 unsigned int i;
655 unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
656 struct clk *_best_parent = 0;
657
658 if (md->try_get_rate) {
659 for (i = 0; i < md->num_parents; i++) {
660 int divider;
661 unsigned long p_rate;
662
663 rrate = __div_round_rate(&md->data, rate,
664 md->parents[i].src,
665 &divider, &p_rate);
666 /*
667 * Check if one of the possible parents is already at
668 * the requested rate.
669 */
670 if (p_rate == clk_get_rate(md->parents[i].src)
671 && rrate == rate) {
672 best = rrate;
673 _best_div = divider;
674 _best_prate = p_rate;
675 _best_parent = md->parents[i].src;
676 goto end;
677 }
678 }
679 }
680
681 for (i = 0; i < md->num_parents; i++) {
682 int div;
683 unsigned long prate;
684
685 rrate = __div_round_rate(&md->data, rate, md->parents[i].src,
686 &div, &prate);
687
688 if (is_better_rate(rate, best, rrate)) {
689 best = rrate;
690 _best_div = div;
691 _best_prate = prate;
692 _best_parent = md->parents[i].src;
693 }
694
695 if (rate <= rrate && rrate <= rate + md->data.rate_margin)
696 break;
697 }
698end:
699 if (best_div)
700 *best_div = _best_div;
701 if (best_prate)
702 *best_prate = _best_prate;
703 if (best_parent)
704 *best_parent = _best_parent;
705
706 if (best)
707 return best;
708 return -EINVAL;
709}
710
711static long mux_div_clk_round_rate(struct clk *c, unsigned long rate)
712{
713 return __mux_div_round_rate(c, rate, NULL, NULL, NULL);
714}
715
716/* requires enable lock to be held */
717static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
718{
719 u32 rc = 0, src_sel;
720
721 src_sel = parent_to_src_sel(md->parents, md->num_parents, parent);
722 /*
723 * If the clock is disabled, don't change to the new settings until
724 * the clock is reenabled
725 */
726 if (md->c.count)
727 rc = md->ops->set_src_div(md, src_sel, div);
728 if (!rc) {
729 md->data.div = div;
730 md->src_sel = src_sel;
731 }
732
733 return rc;
734}
735
736static int set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
737{
738 unsigned long flags;
739 u32 rc;
740
741 spin_lock_irqsave(&md->c.lock, flags);
742 rc = __set_src_div(md, parent, div);
743 spin_unlock_irqrestore(&md->c.lock, flags);
744
745 return rc;
746}
747
748/* Must be called after handoff to ensure parent clock rates are initialized */
749static int safe_parent_init_once(struct clk *c)
750{
751 unsigned long rrate;
752 u32 best_div;
753 struct clk *best_parent;
754 struct mux_div_clk *md = to_mux_div_clk(c);
755
756 if (IS_ERR(md->safe_parent))
757 return -EINVAL;
758 if (!md->safe_freq || md->safe_parent)
759 return 0;
760
761 rrate = __mux_div_round_rate(c, md->safe_freq, &best_parent,
762 &best_div, NULL);
763
764 if (rrate == md->safe_freq) {
765 md->safe_div = best_div;
766 md->safe_parent = best_parent;
767 } else {
768 md->safe_parent = ERR_PTR(-EINVAL);
769 return -EINVAL;
770 }
771 return 0;
772}
773
774static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
775{
776 struct mux_div_clk *md = to_mux_div_clk(c);
777 unsigned long flags, rrate;
778 unsigned long new_prate, new_parent_orig_rate;
779 struct clk *old_parent, *new_parent;
780 u32 new_div, old_div;
781 int rc;
782
783 rc = safe_parent_init_once(c);
784 if (rc)
785 return rc;
786
787 rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
788 &new_prate);
789 if (rrate < rate || rrate > rate + md->data.rate_margin)
790 return -EINVAL;
791
792 old_parent = c->parent;
793 old_div = md->data.div;
794
795 /* Refer to the description of safe_freq in clock-generic.h */
796 if (md->safe_freq)
797 rc = set_src_div(md, md->safe_parent, md->safe_div);
798
799 else if (new_parent == old_parent && new_div >= old_div) {
800 /*
801 * If both the parent_rate and divider changes, there may be an
802 * intermediate frequency generated. Ensure this intermediate
803 * frequency is less than both the new rate and previous rate.
804 */
805 rc = set_src_div(md, old_parent, new_div);
806 }
807 if (rc)
808 return rc;
809
810 new_parent_orig_rate = clk_get_rate(new_parent);
811 rc = clk_set_rate(new_parent, new_prate);
812 if (rc) {
813 pr_err("failed to set %s to %ld\n",
814 clk_name(new_parent), new_prate);
815 goto err_set_rate;
816 }
817
818 rc = __clk_pre_reparent(c, new_parent, &flags);
819 if (rc)
820 goto err_pre_reparent;
821
822 /* Set divider and mux src atomically */
823 rc = __set_src_div(md, new_parent, new_div);
824 if (rc)
825 goto err_set_src_div;
826
827 c->parent = new_parent;
828
829 __clk_post_reparent(c, old_parent, &flags);
830 return 0;
831
832err_set_src_div:
833 /* Not switching to new_parent, so disable it */
834 __clk_post_reparent(c, new_parent, &flags);
835err_pre_reparent:
836 rc = clk_set_rate(new_parent, new_parent_orig_rate);
837 WARN(rc, "%s: error changing new_parent (%s) rate back to %ld\n",
838 clk_name(c), clk_name(new_parent), new_parent_orig_rate);
839err_set_rate:
840 rc = set_src_div(md, old_parent, old_div);
841 WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
842 clk_name(c), old_div, clk_name(old_parent));
843
844 return rc;
845}
846
847static struct clk *mux_div_clk_get_parent(struct clk *c)
848{
849 struct mux_div_clk *md = to_mux_div_clk(c);
850 u32 i, div, src_sel;
851
852 md->ops->get_src_div(md, &src_sel, &div);
853
854 md->data.div = div;
855 md->src_sel = src_sel;
856
857 for (i = 0; i < md->num_parents; i++) {
858 if (md->parents[i].sel == src_sel)
859 return md->parents[i].src;
860 }
861
862 return NULL;
863}
864
865static enum handoff mux_div_clk_handoff(struct clk *c)
866{
867 struct mux_div_clk *md = to_mux_div_clk(c);
868 unsigned long parent_rate;
869 unsigned int numer;
870
871 parent_rate = clk_get_rate(c->parent);
872 /*
873 * div values are doubled for half dividers.
874 * Adjust for that by picking a numer of 2.
875 */
876 numer = md->data.is_half_divider ? 2 : 1;
877
878 if (md->data.div) {
879 c->rate = mult_frac(parent_rate, numer, md->data.div);
880 } else {
881 c->rate = 0;
882 return HANDOFF_DISABLED_CLK;
883 }
884
885 if (md->en_mask && md->ops && md->ops->is_enabled)
886 return md->ops->is_enabled(md)
887 ? HANDOFF_ENABLED_CLK
888 : HANDOFF_DISABLED_CLK;
889
890 /*
891 * If this function returns 'enabled' even when the clock downstream
892 * of this clock is disabled, then handoff code will unnecessarily
893 * enable the current parent of this clock. If this function always
894 * returns 'disabled' and a clock downstream is on, the clock handoff
895 * code will bump up the ref count for this clock and its current
896 * parent as necessary. So, clocks without an actual HW gate can
897 * always return disabled.
898 */
899 return HANDOFF_DISABLED_CLK;
900}
901
902static void __iomem *mux_div_clk_list_registers(struct clk *c, int n,
903 struct clk_register_data **regs, u32 *size)
904{
905 struct mux_div_clk *md = to_mux_div_clk(c);
906
907 if (md->ops && md->ops->list_registers)
908 return md->ops->list_registers(md, n, regs, size);
909
910 return ERR_PTR(-EINVAL);
911}
912
913const struct clk_ops clk_ops_mux_div_clk = {
914 .enable = mux_div_clk_enable,
915 .disable = mux_div_clk_disable,
916 .set_rate = mux_div_clk_set_rate,
917 .round_rate = mux_div_clk_round_rate,
918 .get_parent = mux_div_clk_get_parent,
919 .handoff = mux_div_clk_handoff,
920 .list_registers = mux_div_clk_list_registers,
921};