blob: cbcdf664f33604c283a64c2e57dea8342537a675 [file] [log] [blame]
Maxime Ripard4f728b52016-06-29 21:05:33 +02001/*
2 * Copyright (C) 2016 Maxime Ripard
3 * Maxime Ripard <maxime.ripard@free-electrons.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of
8 * the License, or (at your option) any later version.
9 */
10
11#include <linux/clk-provider.h>
Maxime Ripard4f728b52016-06-29 21:05:33 +020012
13#include "ccu_gate.h"
14#include "ccu_nkmp.h"
15
16struct _ccu_nkmp {
Maxime Ripard6e0d50d2016-09-29 22:57:26 +020017 unsigned long n, min_n, max_n;
18 unsigned long k, min_k, max_k;
19 unsigned long m, min_m, max_m;
20 unsigned long p, min_p, max_p;
Maxime Ripard4f728b52016-06-29 21:05:33 +020021};
22
Jernej Skrabeca5ebc332018-02-14 21:08:56 +010023static unsigned long ccu_nkmp_calc_rate(unsigned long parent,
24 unsigned long n, unsigned long k,
25 unsigned long m, unsigned long p)
26{
27 u64 rate = parent;
28
29 rate *= n * k;
30 do_div(rate, m * p);
31
32 return rate;
33}
34
Maxime Ripard4f728b52016-06-29 21:05:33 +020035static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
36 struct _ccu_nkmp *nkmp)
37{
38 unsigned long best_rate = 0;
39 unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
40 unsigned long _n, _k, _m, _p;
41
Maxime Ripard6e0d50d2016-09-29 22:57:26 +020042 for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++) {
43 for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++) {
44 for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++) {
45 for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
Maxime Ripardee286482016-09-29 22:53:12 +020046 unsigned long tmp_rate;
Maxime Ripard4f728b52016-06-29 21:05:33 +020047
Jernej Skrabeca5ebc332018-02-14 21:08:56 +010048 tmp_rate = ccu_nkmp_calc_rate(parent,
49 _n, _k,
50 _m, _p);
Maxime Ripard4f728b52016-06-29 21:05:33 +020051
Maxime Ripardee286482016-09-29 22:53:12 +020052 if (tmp_rate > rate)
53 continue;
Maxime Ripard4f728b52016-06-29 21:05:33 +020054
Maxime Ripardee286482016-09-29 22:53:12 +020055 if ((rate - tmp_rate) < (rate - best_rate)) {
56 best_rate = tmp_rate;
57 best_n = _n;
58 best_k = _k;
59 best_m = _m;
60 best_p = _p;
61 }
62 }
Maxime Ripard4f728b52016-06-29 21:05:33 +020063 }
64 }
65 }
66
67 nkmp->n = best_n;
68 nkmp->k = best_k;
69 nkmp->m = best_m;
70 nkmp->p = best_p;
71}
72
73static void ccu_nkmp_disable(struct clk_hw *hw)
74{
75 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
76
77 return ccu_gate_helper_disable(&nkmp->common, nkmp->enable);
78}
79
80static int ccu_nkmp_enable(struct clk_hw *hw)
81{
82 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
83
84 return ccu_gate_helper_enable(&nkmp->common, nkmp->enable);
85}
86
87static int ccu_nkmp_is_enabled(struct clk_hw *hw)
88{
89 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
90
91 return ccu_gate_helper_is_enabled(&nkmp->common, nkmp->enable);
92}
93
94static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
95 unsigned long parent_rate)
96{
97 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
Icenowy Zhenga910f252018-03-16 22:02:11 +080098 unsigned long n, m, k, p, rate;
Maxime Ripard4f728b52016-06-29 21:05:33 +020099 u32 reg;
100
101 reg = readl(nkmp->common.base + nkmp->common.reg);
102
103 n = reg >> nkmp->n.shift;
104 n &= (1 << nkmp->n.width) - 1;
Maxime Riparde66f81b2016-11-08 18:12:34 +0100105 n += nkmp->n.offset;
106 if (!n)
107 n++;
Maxime Ripard4f728b52016-06-29 21:05:33 +0200108
109 k = reg >> nkmp->k.shift;
110 k &= (1 << nkmp->k.width) - 1;
Maxime Riparde66f81b2016-11-08 18:12:34 +0100111 k += nkmp->k.offset;
112 if (!k)
113 k++;
Maxime Ripard4f728b52016-06-29 21:05:33 +0200114
115 m = reg >> nkmp->m.shift;
116 m &= (1 << nkmp->m.width) - 1;
Maxime Riparde66f81b2016-11-08 18:12:34 +0100117 m += nkmp->m.offset;
118 if (!m)
119 m++;
Maxime Ripard4f728b52016-06-29 21:05:33 +0200120
121 p = reg >> nkmp->p.shift;
122 p &= (1 << nkmp->p.width) - 1;
123
Icenowy Zhenga910f252018-03-16 22:02:11 +0800124 rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
125 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
126 rate /= nkmp->fixed_post_div;
127
128 return rate;
Maxime Ripard4f728b52016-06-29 21:05:33 +0200129}
130
131static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
132 unsigned long *parent_rate)
133{
134 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
135 struct _ccu_nkmp _nkmp;
136
Icenowy Zhenga910f252018-03-16 22:02:11 +0800137 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
138 rate *= nkmp->fixed_post_div;
139
Jernej Skrabeca8e54332018-08-09 18:52:16 +0200140 if (nkmp->max_rate && rate > nkmp->max_rate) {
141 rate = nkmp->max_rate;
142 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
143 rate /= nkmp->fixed_post_div;
144 return rate;
145 }
146
Chen-Yu Tsai4162c5c2017-03-24 16:33:05 +0800147 _nkmp.min_n = nkmp->n.min ?: 1;
Maxime Ripard0c3c8e12016-10-14 12:08:19 +0200148 _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
Chen-Yu Tsai4162c5c2017-03-24 16:33:05 +0800149 _nkmp.min_k = nkmp->k.min ?: 1;
Maxime Ripard0c3c8e12016-10-14 12:08:19 +0200150 _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
Maxime Ripard6e0d50d2016-09-29 22:57:26 +0200151 _nkmp.min_m = 1;
Maxime Ripard87ba9e52016-09-06 12:29:04 +0200152 _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
Maxime Ripard6e0d50d2016-09-29 22:57:26 +0200153 _nkmp.min_p = 1;
Maxime Ripard87ba9e52016-09-06 12:29:04 +0200154 _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
Maxime Ripard4f728b52016-06-29 21:05:33 +0200155
Maxime Ripard87ba9e52016-09-06 12:29:04 +0200156 ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
Maxime Ripard4f728b52016-06-29 21:05:33 +0200157
Icenowy Zhenga910f252018-03-16 22:02:11 +0800158 rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k,
Jernej Skrabeca5ebc332018-02-14 21:08:56 +0100159 _nkmp.m, _nkmp.p);
Icenowy Zhenga910f252018-03-16 22:02:11 +0800160 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
161 rate = rate / nkmp->fixed_post_div;
162
163 return rate;
Maxime Ripard4f728b52016-06-29 21:05:33 +0200164}
165
166static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
167 unsigned long parent_rate)
168{
169 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
Jernej Skrabec2abc3302019-04-02 23:06:21 +0200170 u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
Maxime Ripard4f728b52016-06-29 21:05:33 +0200171 struct _ccu_nkmp _nkmp;
172 unsigned long flags;
173 u32 reg;
174
Icenowy Zhenga910f252018-03-16 22:02:11 +0800175 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
176 rate = rate * nkmp->fixed_post_div;
177
Chen-Yu Tsai95ad8ed2017-03-24 16:33:06 +0800178 _nkmp.min_n = nkmp->n.min ?: 1;
Maxime Ripard0c3c8e12016-10-14 12:08:19 +0200179 _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
Chen-Yu Tsai95ad8ed2017-03-24 16:33:06 +0800180 _nkmp.min_k = nkmp->k.min ?: 1;
Maxime Ripard0c3c8e12016-10-14 12:08:19 +0200181 _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
Maxime Ripard6e0d50d2016-09-29 22:57:26 +0200182 _nkmp.min_m = 1;
Maxime Ripard87ba9e52016-09-06 12:29:04 +0200183 _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
Maxime Ripard6e0d50d2016-09-29 22:57:26 +0200184 _nkmp.min_p = 1;
Maxime Ripard87ba9e52016-09-06 12:29:04 +0200185 _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
Maxime Ripard4f728b52016-06-29 21:05:33 +0200186
187 ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
188
Jernej Skrabec1054e4d2019-04-03 17:14:04 +0200189 /*
190 * If width is 0, GENMASK() macro may not generate expected mask (0)
191 * as it falls under undefined behaviour by C standard due to shifts
192 * which are equal or greater than width of left operand. This can
193 * be easily avoided by explicitly checking if width is 0.
194 */
Jernej Skrabec2abc3302019-04-02 23:06:21 +0200195 if (nkmp->n.width)
196 n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
197 nkmp->n.shift);
198 if (nkmp->k.width)
199 k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
200 nkmp->k.shift);
201 if (nkmp->m.width)
202 m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
203 nkmp->m.shift);
204 if (nkmp->p.width)
205 p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
206 nkmp->p.shift);
Jernej Skrabecd897ef52018-02-14 21:08:55 +0100207
Maxime Ripard4f728b52016-06-29 21:05:33 +0200208 spin_lock_irqsave(nkmp->common.lock, flags);
209
210 reg = readl(nkmp->common.base + nkmp->common.reg);
Jernej Skrabecd897ef52018-02-14 21:08:55 +0100211 reg &= ~(n_mask | k_mask | m_mask | p_mask);
Maxime Ripard4f728b52016-06-29 21:05:33 +0200212
Jernej Skrabecd897ef52018-02-14 21:08:55 +0100213 reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask;
214 reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask;
215 reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask;
216 reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
Maxime Ripard4f728b52016-06-29 21:05:33 +0200217
218 writel(reg, nkmp->common.base + nkmp->common.reg);
219
220 spin_unlock_irqrestore(nkmp->common.lock, flags);
221
222 ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock);
223
224 return 0;
225}
226
227const struct clk_ops ccu_nkmp_ops = {
228 .disable = ccu_nkmp_disable,
229 .enable = ccu_nkmp_enable,
230 .is_enabled = ccu_nkmp_is_enabled,
231
232 .recalc_rate = ccu_nkmp_recalc_rate,
233 .round_rate = ccu_nkmp_round_rate,
234 .set_rate = ccu_nkmp_set_rate,
235};