blob: 609db9c95fd7a1c37c50ffd3bf25e1c2956c6a8c [file] [log] [blame]
David Dai976daac2020-02-28 12:11:40 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
4 */
5
6#include <asm/div64.h>
7#include <linux/interconnect-provider.h>
8#include <linux/list_sort.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/platform_device.h>
12
13#include <soc/qcom/rpmh.h>
14#include <soc/qcom/tcs.h>
15
16#include "bcm-voter.h"
17#include "icc-rpmh.h"
18
19static LIST_HEAD(bcm_voters);
20static DEFINE_MUTEX(bcm_voter_lock);
21
22/**
23 * struct bcm_voter - Bus Clock Manager voter
24 * @dev: reference to the device that communicates with the BCM
25 * @np: reference to the device node to match bcm voters
26 * @lock: mutex to protect commit and wake/sleep lists in the voter
27 * @commit_list: list containing bcms to be committed to hardware
28 * @ws_list: list containing bcms that have different wake/sleep votes
29 * @voter_node: list of bcm voters
30 */
31struct bcm_voter {
32 struct device *dev;
33 struct device_node *np;
34 struct mutex lock;
35 struct list_head commit_list;
36 struct list_head ws_list;
37 struct list_head voter_node;
38};
39
40static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
41{
42 const struct qcom_icc_bcm *bcm_a =
43 list_entry(a, struct qcom_icc_bcm, list);
44 const struct qcom_icc_bcm *bcm_b =
45 list_entry(b, struct qcom_icc_bcm, list);
46
47 if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd)
48 return -1;
49 else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd)
50 return 0;
51 else
52 return 1;
53}
54
Mike Tipton91e045b2020-09-03 12:21:44 -070055static u64 bcm_div(u64 num, u32 base)
56{
57 /* Ensure that small votes aren't lost. */
58 if (num && num < base)
59 return 1;
60
61 do_div(num, base);
62
63 return num;
64}
65
David Dai976daac2020-02-28 12:11:40 +020066static void bcm_aggregate(struct qcom_icc_bcm *bcm)
67{
Mike Tipton91e045b2020-09-03 12:21:44 -070068 struct qcom_icc_node *node;
David Dai976daac2020-02-28 12:11:40 +020069 size_t i, bucket;
70 u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
71 u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
72 u64 temp;
73
74 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
75 for (i = 0; i < bcm->num_nodes; i++) {
Mike Tipton91e045b2020-09-03 12:21:44 -070076 node = bcm->nodes[i];
77 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
78 node->buswidth * node->channels);
David Dai976daac2020-02-28 12:11:40 +020079 agg_avg[bucket] = max(agg_avg[bucket], temp);
80
Mike Tipton91e045b2020-09-03 12:21:44 -070081 temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
82 node->buswidth);
David Dai976daac2020-02-28 12:11:40 +020083 agg_peak[bucket] = max(agg_peak[bucket], temp);
84 }
85
86 temp = agg_avg[bucket] * 1000ULL;
Mike Tipton91e045b2020-09-03 12:21:44 -070087 bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
David Dai976daac2020-02-28 12:11:40 +020088
89 temp = agg_peak[bucket] * 1000ULL;
Mike Tipton91e045b2020-09-03 12:21:44 -070090 bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
David Dai976daac2020-02-28 12:11:40 +020091 }
92
93 if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
94 bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
95 bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
96 bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
97 bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
98 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
99 }
100}
101
102static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
103 u32 addr, bool commit)
104{
105 bool valid = true;
106
107 if (!cmd)
108 return;
109
Mike Tipton9c0c54a2020-04-15 16:03:27 +0300110 memset(cmd, 0, sizeof(*cmd));
111
David Dai976daac2020-02-28 12:11:40 +0200112 if (vote_x == 0 && vote_y == 0)
113 valid = false;
114
115 if (vote_x > BCM_TCS_CMD_VOTE_MASK)
116 vote_x = BCM_TCS_CMD_VOTE_MASK;
117
118 if (vote_y > BCM_TCS_CMD_VOTE_MASK)
119 vote_y = BCM_TCS_CMD_VOTE_MASK;
120
121 cmd->addr = addr;
122 cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
123
124 /*
125 * Set the wait for completion flag on command that need to be completed
126 * before the next command.
127 */
Mike Tipton9c0c54a2020-04-15 16:03:27 +0300128 cmd->wait = commit;
David Dai976daac2020-02-28 12:11:40 +0200129}
130
131static void tcs_list_gen(struct list_head *bcm_list, int bucket,
132 struct tcs_cmd tcs_list[MAX_BCMS],
133 int n[MAX_VCD + 1])
134{
135 struct qcom_icc_bcm *bcm;
136 bool commit;
137 size_t idx = 0, batch = 0, cur_vcd_size = 0;
138
139 memset(n, 0, sizeof(int) * (MAX_VCD + 1));
140
141 list_for_each_entry(bcm, bcm_list, list) {
142 commit = false;
143 cur_vcd_size++;
144 if ((list_is_last(&bcm->list, bcm_list)) ||
145 bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
146 commit = true;
147 cur_vcd_size = 0;
148 }
149 tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
150 bcm->vote_y[bucket], bcm->addr, commit);
151 idx++;
152 n[batch]++;
153 /*
154 * Batch the BCMs in such a way that we do not split them in
155 * multiple payloads when they are under the same VCD. This is
156 * to ensure that every BCM is committed since we only set the
157 * commit bit on the last BCM request of every VCD.
158 */
159 if (n[batch] >= MAX_RPMH_PAYLOAD) {
160 if (!commit) {
161 n[batch] -= cur_vcd_size;
162 n[batch + 1] = cur_vcd_size;
163 }
164 batch++;
165 }
166 }
167}
168
169/**
170 * of_bcm_voter_get - gets a bcm voter handle from DT node
171 * @dev: device pointer for the consumer device
172 * @name: name for the bcm voter device
173 *
174 * This function will match a device_node pointer for the phandle
175 * specified in the device DT and return a bcm_voter handle on success.
176 *
177 * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned
178 * when matching bcm voter is yet to be found.
179 */
180struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
181{
182 struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER);
183 struct bcm_voter *temp;
184 struct device_node *np, *node;
185 int idx = 0;
186
187 if (!dev || !dev->of_node)
188 return ERR_PTR(-ENODEV);
189
190 np = dev->of_node;
191
192 if (name) {
193 idx = of_property_match_string(np, "qcom,bcm-voter-names", name);
194 if (idx < 0)
195 return ERR_PTR(idx);
196 }
197
198 node = of_parse_phandle(np, "qcom,bcm-voters", idx);
199
200 mutex_lock(&bcm_voter_lock);
201 list_for_each_entry(temp, &bcm_voters, voter_node) {
202 if (temp->np == node) {
203 voter = temp;
204 break;
205 }
206 }
207 mutex_unlock(&bcm_voter_lock);
208
209 return voter;
210}
211EXPORT_SYMBOL_GPL(of_bcm_voter_get);
212
213/**
214 * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates
215 * @voter: voter that the bcms are being added to
216 * @bcm: bcm to add to the commit and wake sleep list
217 */
218void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm)
219{
220 if (!voter)
221 return;
222
223 mutex_lock(&voter->lock);
224 if (list_empty(&bcm->list))
225 list_add_tail(&bcm->list, &voter->commit_list);
226
227 if (list_empty(&bcm->ws_list))
228 list_add_tail(&bcm->ws_list, &voter->ws_list);
229
230 mutex_unlock(&voter->lock);
231}
232EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add);
233
234/**
235 * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms
236 * @voter: voter that needs flushing
237 *
238 * This function generates a set of AMC commands and flushes to the BCM device
239 * associated with the voter. It conditionally generate WAKE and SLEEP commands
240 * based on deltas between WAKE/SLEEP requirements. The ws_list persists
241 * through multiple commit requests and bcm nodes are removed only when the
242 * requirements for WAKE matches SLEEP.
243 *
244 * Returns 0 on success, or an appropriate error code otherwise.
245 */
246int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
247{
248 struct qcom_icc_bcm *bcm;
249 struct qcom_icc_bcm *bcm_tmp;
250 int commit_idx[MAX_VCD + 1];
251 struct tcs_cmd cmds[MAX_BCMS];
252 int ret = 0;
253
254 if (!voter)
255 return 0;
256
257 mutex_lock(&voter->lock);
258 list_for_each_entry(bcm, &voter->commit_list, list)
259 bcm_aggregate(bcm);
260
261 /*
262 * Pre sort the BCMs based on VCD for ease of generating a command list
263 * that groups the BCMs with the same VCD together. VCDs are numbered
264 * with lowest being the most expensive time wise, ensuring that
265 * those commands are being sent the earliest in the queue. This needs
266 * to be sorted every commit since we can't guarantee the order in which
267 * the BCMs are added to the list.
268 */
269 list_sort(NULL, &voter->commit_list, cmp_vcd);
270
271 /*
272 * Construct the command list based on a pre ordered list of BCMs
273 * based on VCD.
274 */
275 tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
276
277 if (!commit_idx[0])
278 goto out;
279
Maulik Shah73edcd32020-06-18 18:35:53 +0530280 rpmh_invalidate(voter->dev);
David Dai976daac2020-02-28 12:11:40 +0200281
282 ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
283 cmds, commit_idx);
284 if (ret) {
285 pr_err("Error sending AMC RPMH requests (%d)\n", ret);
286 goto out;
287 }
288
289 list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
290 list_del_init(&bcm->list);
291
292 list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) {
293 /*
294 * Only generate WAKE and SLEEP commands if a resource's
295 * requirements change as the execution environment transitions
296 * between different power states.
297 */
298 if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] !=
299 bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
300 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] !=
301 bcm->vote_y[QCOM_ICC_BUCKET_SLEEP])
302 list_add_tail(&bcm->list, &voter->commit_list);
303 else
304 list_del_init(&bcm->ws_list);
305 }
306
307 if (list_empty(&voter->commit_list))
308 goto out;
309
310 list_sort(NULL, &voter->commit_list, cmp_vcd);
311
312 tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
313
314 ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
315 if (ret) {
316 pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
317 goto out;
318 }
319
320 tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
321
322 ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
323 if (ret) {
324 pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
325 goto out;
326 }
327
328out:
329 list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
330 list_del_init(&bcm->list);
331
332 mutex_unlock(&voter->lock);
333 return ret;
334}
335EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
336
337static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
338{
339 struct bcm_voter *voter;
340
341 voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
342 if (!voter)
343 return -ENOMEM;
344
345 voter->dev = &pdev->dev;
346 voter->np = pdev->dev.of_node;
347 mutex_init(&voter->lock);
348 INIT_LIST_HEAD(&voter->commit_list);
349 INIT_LIST_HEAD(&voter->ws_list);
350
351 mutex_lock(&bcm_voter_lock);
352 list_add_tail(&voter->voter_node, &bcm_voters);
353 mutex_unlock(&bcm_voter_lock);
354
355 return 0;
356}
357
358static const struct of_device_id bcm_voter_of_match[] = {
359 { .compatible = "qcom,bcm-voter" },
360 { }
361};
362
363static struct platform_driver qcom_icc_bcm_voter_driver = {
364 .probe = qcom_icc_bcm_voter_probe,
365 .driver = {
366 .name = "bcm_voter",
367 .of_match_table = bcm_voter_of_match,
368 },
369};
370module_platform_driver(qcom_icc_bcm_voter_driver);
371
372MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
373MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver");
374MODULE_LICENSE("GPL v2");