blob: c491f5de0f3f45f7906cbcb99513d534e0082f0e [file] [log] [blame]
Sudeep Holla6d6a1d82017-06-13 17:19:36 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Power Interface (SCMI) Protocol based clock driver
4 *
5 * Copyright (C) 2018 ARM Ltd.
6 */
7
8#include <linux/clk-provider.h>
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/of.h>
12#include <linux/module.h>
13#include <linux/scmi_protocol.h>
14#include <asm/div64.h>
15
16struct scmi_clk {
17 u32 id;
18 struct clk_hw hw;
19 const struct scmi_clock_info *info;
20 const struct scmi_handle *handle;
21};
22
23#define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
24
25static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
26 unsigned long parent_rate)
27{
28 int ret;
29 u64 rate;
30 struct scmi_clk *clk = to_scmi_clk(hw);
31
32 ret = clk->handle->clk_ops->rate_get(clk->handle, clk->id, &rate);
33 if (ret)
34 return 0;
35 return rate;
36}
37
38static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
39 unsigned long *parent_rate)
40{
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010041 u64 fmin, fmax, ftmp;
42 struct scmi_clk *clk = to_scmi_clk(hw);
43
44 /*
45 * We can't figure out what rate it will be, so just return the
46 * rate back to the caller. scmi_clk_recalc_rate() will be called
47 * after the rate is set and we'll know what rate the clock is
48 * running at then.
49 */
50 if (clk->info->rate_discrete)
51 return rate;
52
53 fmin = clk->info->range.min_rate;
54 fmax = clk->info->range.max_rate;
55 if (rate <= fmin)
56 return fmin;
57 else if (rate >= fmax)
58 return fmax;
59
60 ftmp = rate - fmin;
61 ftmp += clk->info->range.step_size - 1; /* to round up */
Amit Daniel Kachhap7a8655e2018-07-31 11:25:55 +053062 do_div(ftmp, clk->info->range.step_size);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010063
Amit Daniel Kachhap7a8655e2018-07-31 11:25:55 +053064 return ftmp * clk->info->range.step_size + fmin;
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010065}
66
67static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
68 unsigned long parent_rate)
69{
70 struct scmi_clk *clk = to_scmi_clk(hw);
71
Sudeep Hollad0aba112019-07-08 09:42:22 +010072 return clk->handle->clk_ops->rate_set(clk->handle, clk->id, rate);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010073}
74
75static int scmi_clk_enable(struct clk_hw *hw)
76{
77 struct scmi_clk *clk = to_scmi_clk(hw);
78
79 return clk->handle->clk_ops->enable(clk->handle, clk->id);
80}
81
82static void scmi_clk_disable(struct clk_hw *hw)
83{
84 struct scmi_clk *clk = to_scmi_clk(hw);
85
86 clk->handle->clk_ops->disable(clk->handle, clk->id);
87}
88
89static const struct clk_ops scmi_clk_ops = {
90 .recalc_rate = scmi_clk_recalc_rate,
91 .round_rate = scmi_clk_round_rate,
92 .set_rate = scmi_clk_set_rate,
93 /*
94 * We can't provide enable/disable callback as we can't perform the same
95 * in atomic context. Since the clock framework provides standard API
96 * clk_prepare_enable that helps cases using clk_enable in non-atomic
97 * context, it should be fine providing prepare/unprepare.
98 */
99 .prepare = scmi_clk_enable,
100 .unprepare = scmi_clk_disable,
101};
102
103static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
104{
105 int ret;
106 struct clk_init_data init = {
107 .flags = CLK_GET_RATE_NOCACHE,
108 .num_parents = 0,
109 .ops = &scmi_clk_ops,
110 .name = sclk->info->name,
111 };
112
113 sclk->hw.init = &init;
114 ret = devm_clk_hw_register(dev, &sclk->hw);
115 if (!ret)
116 clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate,
117 sclk->info->range.max_rate);
118 return ret;
119}
120
121static int scmi_clocks_probe(struct scmi_device *sdev)
122{
123 int idx, count, err;
124 struct clk_hw **hws;
125 struct clk_hw_onecell_data *clk_data;
126 struct device *dev = &sdev->dev;
127 struct device_node *np = dev->of_node;
128 const struct scmi_handle *handle = sdev->handle;
129
130 if (!handle || !handle->clk_ops)
131 return -ENODEV;
132
133 count = handle->clk_ops->count_get(handle);
134 if (count < 0) {
Rob Herringe665f022018-08-28 10:44:29 -0500135 dev_err(dev, "%pOFn: invalid clock output count\n", np);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100136 return -EINVAL;
137 }
138
Kees Cook0ed2dd02018-05-08 16:08:53 -0700139 clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
140 GFP_KERNEL);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100141 if (!clk_data)
142 return -ENOMEM;
143
144 clk_data->num = count;
145 hws = clk_data->hws;
146
147 for (idx = 0; idx < count; idx++) {
148 struct scmi_clk *sclk;
149
150 sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
151 if (!sclk)
152 return -ENOMEM;
153
154 sclk->info = handle->clk_ops->info_get(handle, idx);
155 if (!sclk->info) {
156 dev_dbg(dev, "invalid clock info for idx %d\n", idx);
157 continue;
158 }
159
160 sclk->id = idx;
161 sclk->handle = handle;
162
163 err = scmi_clk_ops_init(dev, sclk);
164 if (err) {
165 dev_err(dev, "failed to register clock %d\n", idx);
166 devm_kfree(dev, sclk);
167 hws[idx] = NULL;
168 } else {
169 dev_dbg(dev, "Registered clock:%s\n", sclk->info->name);
170 hws[idx] = &sclk->hw;
171 }
172 }
173
Sudeep Holla7f9badf2018-03-20 11:22:48 +0000174 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
175 clk_data);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100176}
177
178static const struct scmi_device_id scmi_id_table[] = {
Sudeep Holla43998df2019-11-06 17:55:47 +0000179 { SCMI_PROTOCOL_CLOCK, "clocks" },
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100180 { },
181};
182MODULE_DEVICE_TABLE(scmi, scmi_id_table);
183
184static struct scmi_driver scmi_clocks_driver = {
185 .name = "scmi-clocks",
186 .probe = scmi_clocks_probe,
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100187 .id_table = scmi_id_table,
188};
189module_scmi_driver(scmi_clocks_driver);
190
191MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
192MODULE_DESCRIPTION("ARM SCMI clock driver");
193MODULE_LICENSE("GPL v2");