blob: 1d1d7e74e942f6aab7dc63b27d84c7e23326f2c6 [file] [log] [blame]
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +05301/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Maulik Shah296ddb12017-07-03 12:25:54 +053013
14#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
15
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070016#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/of.h>
20#include <linux/err.h>
21#include <linux/sysfs.h>
22#include <linux/device.h>
23#include <linux/platform_device.h>
24#include <linux/moduleparam.h>
25#include "lpm-levels.h"
26
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070027enum lpm_type {
28 IDLE = 0,
29 SUSPEND,
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060030 LATENCY,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070031 LPM_TYPE_NR
32};
33
34struct lpm_type_str {
35 enum lpm_type type;
36 char *str;
37};
38
39static const struct lpm_type_str lpm_types[] = {
40 {IDLE, "idle_enabled"},
41 {SUSPEND, "suspend_enabled"},
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060042 {LATENCY, "latency_us"},
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070043};
44
45static DEFINE_PER_CPU(uint32_t *, max_residency);
46static DEFINE_PER_CPU(uint32_t *, min_residency);
47static struct lpm_level_avail *cpu_level_available[NR_CPUS];
48static struct platform_device *lpm_pdev;
49
50static void *get_enabled_ptr(struct kobj_attribute *attr,
51 struct lpm_level_avail *avail)
52{
53 void *arg = NULL;
54
55 if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
56 arg = (void *) &avail->idle_enabled;
57 else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
58 arg = (void *) &avail->suspend_enabled;
59
60 return arg;
61}
62
63static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
64 struct kobj_attribute *attr)
65{
66 struct lpm_level_avail *avail = NULL;
67
68 if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
69 avail = container_of(attr, struct lpm_level_avail,
70 idle_enabled_attr);
71 else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
72 avail = container_of(attr, struct lpm_level_avail,
73 suspend_enabled_attr);
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060074 else if (!strcmp(attr->attr.name, lpm_types[LATENCY].str))
75 avail = container_of(attr, struct lpm_level_avail,
76 latency_attr);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070077
78 return avail;
79}
80
81static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
82 bool probe_time)
83{
84 int i, j;
85 bool mode_avail;
86 uint32_t *maximum_residency = per_cpu(max_residency, cpu_id);
87 uint32_t *minimum_residency = per_cpu(min_residency, cpu_id);
88
89 for (i = 0; i < cpu->nlevels; i++) {
90 struct power_params *pwr = &cpu->levels[i].pwr;
91
92 mode_avail = probe_time ||
93 lpm_cpu_mode_allow(cpu_id, i, true);
94
95 if (!mode_avail) {
96 maximum_residency[i] = 0;
97 minimum_residency[i] = 0;
98 continue;
99 }
100
101 maximum_residency[i] = ~0;
102 for (j = i + 1; j < cpu->nlevels; j++) {
103 mode_avail = probe_time ||
104 lpm_cpu_mode_allow(cpu_id, j, true);
105
106 if (mode_avail &&
107 (maximum_residency[i] > pwr->residencies[j]) &&
108 (pwr->residencies[j] != 0))
109 maximum_residency[i] = pwr->residencies[j];
110 }
111
112 minimum_residency[i] = pwr->time_overhead_us;
113 for (j = i-1; j >= 0; j--) {
114 if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) {
115 minimum_residency[i] = maximum_residency[j] + 1;
116 break;
117 }
118 }
119 }
120}
121
122static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
123 bool probe_time)
124{
125 int i, j;
126 bool mode_avail;
127
128 for (i = 0; i < cluster->nlevels; i++) {
129 struct power_params *pwr = &cluster->levels[i].pwr;
130
131 mode_avail = probe_time ||
132 lpm_cluster_mode_allow(cluster, i,
133 true);
134
135 if (!mode_avail) {
136 pwr->max_residency = 0;
137 pwr->min_residency = 0;
138 continue;
139 }
140
141 pwr->max_residency = ~0;
142 for (j = i+1; j < cluster->nlevels; j++) {
143 mode_avail = probe_time ||
144 lpm_cluster_mode_allow(cluster, j,
145 true);
146 if (mode_avail &&
147 (pwr->max_residency > pwr->residencies[j]) &&
148 (pwr->residencies[j] != 0))
149 pwr->max_residency = pwr->residencies[j];
150 }
151
152 pwr->min_residency = pwr->time_overhead_us;
153 for (j = i-1; j >= 0; j--) {
154 if (probe_time ||
155 lpm_cluster_mode_allow(cluster, j, true)) {
156 pwr->min_residency =
157 cluster->levels[j].pwr.max_residency + 1;
158 break;
159 }
160 }
161 }
162}
163
164uint32_t *get_per_cpu_max_residency(int cpu)
165{
166 return per_cpu(max_residency, cpu);
167}
168
169uint32_t *get_per_cpu_min_residency(int cpu)
170{
171 return per_cpu(min_residency, cpu);
172}
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600173
174static ssize_t lpm_latency_show(struct kobject *kobj,
175 struct kobj_attribute *attr, char *buf)
176{
177 int ret = 0;
178 struct kernel_param kp;
179 struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
180
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530181 if (WARN_ON(!avail))
182 return -EINVAL;
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600183
184 kp.arg = &avail->latency_us;
185
186 ret = param_get_uint(buf, &kp);
187 if (ret > 0) {
188 strlcat(buf, "\n", PAGE_SIZE);
189 ret++;
190 }
191
192 return ret;
193}
194
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700195ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
196 char *buf)
197{
198 int ret = 0;
199 struct kernel_param kp;
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530200 struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700201
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530202 if (WARN_ON(!avail))
203 return -EINVAL;
204
205 kp.arg = get_enabled_ptr(attr, avail);
206 if (WARN_ON(!kp.arg))
207 return -EINVAL;
208
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700209 ret = param_get_bool(buf, &kp);
210 if (ret > 0) {
211 strlcat(buf, "\n", PAGE_SIZE);
212 ret++;
213 }
214
215 return ret;
216}
217
218ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
219 const char *buf, size_t len)
220{
221 int ret = 0;
222 struct kernel_param kp;
223 struct lpm_level_avail *avail;
224
225 avail = get_avail_ptr(kobj, attr);
226 if (WARN_ON(!avail))
227 return -EINVAL;
Maulik Shah296ddb12017-07-03 12:25:54 +0530228
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700229 kp.arg = get_enabled_ptr(attr, avail);
230 ret = param_set_bool(buf, &kp);
231
232 if (avail->cpu_node)
233 set_optimum_cpu_residency(avail->data, avail->idx, false);
234 else
235 set_optimum_cluster_residency(avail->data, false);
236
237 return ret ? ret : len;
238}
239
240static int create_lvl_avail_nodes(const char *name,
241 struct kobject *parent, struct lpm_level_avail *avail,
242 void *data, int index, bool cpu_node)
243{
244 struct attribute_group *attr_group = NULL;
245 struct attribute **attr = NULL;
246 struct kobject *kobj = NULL;
247 int ret = 0;
248
249 kobj = kobject_create_and_add(name, parent);
250 if (!kobj)
251 return -ENOMEM;
252
253 attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
254 GFP_KERNEL);
255 if (!attr_group) {
256 ret = -ENOMEM;
257 goto failed;
258 }
259
260 attr = devm_kzalloc(&lpm_pdev->dev,
261 sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
262 if (!attr) {
263 ret = -ENOMEM;
264 goto failed;
265 }
266
267 sysfs_attr_init(&avail->idle_enabled_attr.attr);
268 avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
269 avail->idle_enabled_attr.attr.mode = 0644;
270 avail->idle_enabled_attr.show = lpm_enable_show;
271 avail->idle_enabled_attr.store = lpm_enable_store;
272
273 sysfs_attr_init(&avail->suspend_enabled_attr.attr);
274 avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
275 avail->suspend_enabled_attr.attr.mode = 0644;
276 avail->suspend_enabled_attr.show = lpm_enable_show;
277 avail->suspend_enabled_attr.store = lpm_enable_store;
278
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600279 sysfs_attr_init(&avail->latency_attr.attr);
280 avail->latency_attr.attr.name = lpm_types[LATENCY].str;
281 avail->latency_attr.attr.mode = 0444;
282 avail->latency_attr.show = lpm_latency_show;
283 avail->latency_attr.store = NULL;
284
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700285 attr[0] = &avail->idle_enabled_attr.attr;
286 attr[1] = &avail->suspend_enabled_attr.attr;
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600287 attr[2] = &avail->latency_attr.attr;
288 attr[3] = NULL;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700289 attr_group->attrs = attr;
290
291 ret = sysfs_create_group(kobj, attr_group);
292 if (ret) {
293 ret = -ENOMEM;
294 goto failed;
295 }
296
297 avail->idle_enabled = true;
298 avail->suspend_enabled = true;
299 avail->kobj = kobj;
300 avail->data = data;
301 avail->idx = index;
302 avail->cpu_node = cpu_node;
303
304 return ret;
305
306failed:
307 kobject_put(kobj);
308 return ret;
309}
310
311static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
312{
313 int cpu;
314 int i, cpu_idx;
315 struct kobject **cpu_kobj = NULL;
316 struct lpm_level_avail *level_list = NULL;
317 char cpu_name[20] = {0};
318 int ret = 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600319 struct list_head *pos;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700320
321 cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
322 cpumask_weight(&p->child_cpus), GFP_KERNEL);
323 if (!cpu_kobj)
324 return -ENOMEM;
325
326 cpu_idx = 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600327 list_for_each(pos, &p->cpu) {
328 struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700329
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600330 for_each_cpu(cpu, &lpm_cpu->related_cpus) {
331 snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
332 cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name,
333 parent);
334 if (!cpu_kobj[cpu_idx]) {
335 ret = -ENOMEM;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700336 goto release_kobj;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600337 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700338
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600339 level_list = devm_kzalloc(&lpm_pdev->dev,
340 lpm_cpu->nlevels * sizeof(*level_list),
341 GFP_KERNEL);
342 if (!level_list) {
343 ret = -ENOMEM;
344 goto release_kobj;
345 }
346
347 /*
348 * Skip enable/disable for WFI. cpuidle expects WFI to
349 * be available at all times.
350 */
351 for (i = 1; i < lpm_cpu->nlevels; i++) {
352 level_list[i].latency_us =
353 p->levels[i].pwr.latency_us;
354 ret = create_lvl_avail_nodes(
355 lpm_cpu->levels[i].name,
356 cpu_kobj[cpu_idx],
357 &level_list[i],
358 (void *)lpm_cpu, cpu, true);
359 if (ret)
360 goto release_kobj;
361 }
362
363 cpu_level_available[cpu] = level_list;
364 cpu_idx++;
365 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700366 }
367
368 return ret;
369
370release_kobj:
371 for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
372 kobject_put(cpu_kobj[i]);
373
374 return ret;
375}
376
377int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
378{
379 int ret = 0;
380 struct lpm_cluster *child = NULL;
381 int i;
382 struct kobject *cluster_kobj = NULL;
383
384 if (!p)
385 return -ENODEV;
386
387 cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
388 if (!cluster_kobj)
389 return -ENOMEM;
390
391 for (i = 0; i < p->nlevels; i++) {
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600392 p->levels[i].available.latency_us = p->levels[i].pwr.latency_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700393 ret = create_lvl_avail_nodes(p->levels[i].level_name,
394 cluster_kobj, &p->levels[i].available,
395 (void *)p, 0, false);
396 if (ret)
397 return ret;
398 }
399
400 list_for_each_entry(child, &p->child, list) {
401 ret = create_cluster_lvl_nodes(child, cluster_kobj);
402 if (ret)
403 return ret;
404 }
405
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600406 if (!list_empty(&p->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700407 ret = create_cpu_lvl_nodes(p, cluster_kobj);
408 if (ret)
409 return ret;
410 }
411
Maulik Shah296ddb12017-07-03 12:25:54 +0530412 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700413}
414
415bool lpm_cpu_mode_allow(unsigned int cpu,
416 unsigned int index, bool from_idle)
417{
418 struct lpm_level_avail *avail = cpu_level_available[cpu];
419
Srinivas Rao Lf2a21e72017-10-13 23:41:58 +0530420 if (lpm_pdev && !index)
421 return 1;
422
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700423 if (!lpm_pdev || !avail)
424 return !from_idle;
425
426 return !!(from_idle ? avail[index].idle_enabled :
427 avail[index].suspend_enabled);
428}
429
430bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
431 unsigned int mode, bool from_idle)
432{
433 struct lpm_level_avail *avail = &cluster->levels[mode].available;
434
435 if (!lpm_pdev || !avail)
436 return false;
437
438 return !!(from_idle ? avail->idle_enabled :
439 avail->suspend_enabled);
440}
441
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700442static int parse_cluster_params(struct device_node *node,
443 struct lpm_cluster *c)
444{
445 char *key;
446 int ret;
447
448 key = "label";
449 ret = of_property_read_string(node, key, &c->cluster_name);
Maulik Shah296ddb12017-07-03 12:25:54 +0530450 if (ret)
451 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700452
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600453 key = "qcom,psci-mode-shift";
Maulik Shah296ddb12017-07-03 12:25:54 +0530454 ret = of_property_read_u32(node, key, &c->psci_mode_shift);
455 if (ret)
456 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700457
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600458 key = "qcom,psci-mode-mask";
Maulik Shah296ddb12017-07-03 12:25:54 +0530459 ret = of_property_read_u32(node, key, &c->psci_mode_mask);
460 if (ret)
461 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700462
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530463 key = "qcom,disable-prediction";
464 c->lpm_prediction = !(of_property_read_bool(node, key));
465
466 if (c->lpm_prediction) {
467 key = "qcom,clstr-tmr-add";
468 ret = of_property_read_u32(node, key, &c->tmr_add);
469 if (ret || c->tmr_add < TIMER_ADD_LOW ||
470 c->tmr_add > TIMER_ADD_HIGH)
471 c->tmr_add = DEFAULT_TIMER_ADD;
472 }
473
Maulik Shah296ddb12017-07-03 12:25:54 +0530474 /* Set default_level to 0 as default */
475 c->default_level = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700476
Raghavendra Kakarlaeaa6a3c2018-04-26 15:15:32 +0530477 return 0;
Maulik Shah296ddb12017-07-03 12:25:54 +0530478fail:
479 pr_err("Failed to read key: %s ret: %d\n", key, ret);
480
481 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700482}
483
484static int parse_power_params(struct device_node *node,
485 struct power_params *pwr)
486{
487 char *key;
488 int ret;
489
490 key = "qcom,latency-us";
491 ret = of_property_read_u32(node, key, &pwr->latency_us);
492 if (ret)
493 goto fail;
494
495 key = "qcom,ss-power";
496 ret = of_property_read_u32(node, key, &pwr->ss_power);
497 if (ret)
498 goto fail;
499
500 key = "qcom,energy-overhead";
501 ret = of_property_read_u32(node, key, &pwr->energy_overhead);
502 if (ret)
503 goto fail;
504
505 key = "qcom,time-overhead";
506 ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
507 if (ret)
508 goto fail;
509
Maulik Shah296ddb12017-07-03 12:25:54 +0530510 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700511fail:
Maulik Shah296ddb12017-07-03 12:25:54 +0530512 pr_err("Failed to read key: %s node: %s\n", key, node->name);
513
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700514 return ret;
515}
516
517static int parse_cluster_level(struct device_node *node,
518 struct lpm_cluster *cluster)
519{
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700520 struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
521 int ret = -ENOMEM;
522 char *key;
523
524 key = "label";
525 ret = of_property_read_string(node, key, &level->level_name);
526 if (ret)
527 goto failed;
528
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600529 key = "qcom,psci-mode";
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600530 ret = of_property_read_u32(node, key, &level->psci_id);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700531 if (ret)
532 goto failed;
533
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600534 level->is_reset = of_property_read_bool(node, "qcom,is-reset");
535
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700536 if (cluster->nlevels != cluster->default_level) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530537 key = "qcom,min-child-idx";
538 ret = of_property_read_u32(node, key, &level->min_child_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700539 if (ret)
540 goto failed;
541
542 if (cluster->min_child_level > level->min_child_level)
543 cluster->min_child_level = level->min_child_level;
544 }
545
546 level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700547
548 key = "parse_power_params";
549 ret = parse_power_params(node, &level->pwr);
550 if (ret)
551 goto failed;
552
553 key = "qcom,reset-level";
554 ret = of_property_read_u32(node, key, &level->reset_level);
555 if (ret == -EINVAL)
556 level->reset_level = LPM_RESET_LVL_NONE;
557 else if (ret)
558 goto failed;
559
560 cluster->nlevels++;
Maulik Shah296ddb12017-07-03 12:25:54 +0530561
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700562 return 0;
563failed:
Maulik Shah296ddb12017-07-03 12:25:54 +0530564 pr_err("Failed to read key: %s ret: %d\n", key, ret);
565
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700566 return ret;
567}
568
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700569static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
570{
571 char *key;
572 int ret;
573
Maulik Shah296ddb12017-07-03 12:25:54 +0530574 key = "label";
575 ret = of_property_read_string(n, key, &l->name);
576 if (ret)
577 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700578
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600579 key = "qcom,psci-cpu-mode";
580 ret = of_property_read_u32(n, key, &l->psci_id);
Maulik Shah296ddb12017-07-03 12:25:54 +0530581 if (ret)
582 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700583
Maulik Shah296ddb12017-07-03 12:25:54 +0530584 return ret;
585fail:
586 pr_err("Failed to read key: %s level: %s\n", key, l->name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700587
Maulik Shah296ddb12017-07-03 12:25:54 +0530588 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700589}
590
591static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
592{
593 struct device_node *cpu_node;
594 int cpu;
595 int idx = 0;
596
597 cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
598 if (!cpu_node) {
599 pr_info("%s: No CPU phandle, assuming single cluster\n",
600 node->full_name);
601 /*
602 * Not all targets have the cpu node populated in the device
603 * tree. If cpu node is not populated assume all possible
604 * nodes belong to this cluster
605 */
606 cpumask_copy(mask, cpu_possible_mask);
607 return 0;
608 }
609
610 while (cpu_node) {
611 for_each_possible_cpu(cpu) {
612 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
613 cpumask_set_cpu(cpu, mask);
614 break;
615 }
616 }
Maulik Shah598a1722018-05-24 15:52:38 +0530617 of_node_put(cpu_node);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700618 cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
619 }
620
621 return 0;
622}
623
624static int calculate_residency(struct power_params *base_pwr,
625 struct power_params *next_pwr)
626{
627 int32_t residency = (int32_t)(next_pwr->energy_overhead -
628 base_pwr->energy_overhead) -
629 ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
630 - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
631
632 residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
633
634 if (residency < 0) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530635 pr_err("Residency < 0 for LPM\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700636 return next_pwr->time_overhead_us;
637 }
638
639 return residency < next_pwr->time_overhead_us ?
640 next_pwr->time_overhead_us : residency;
641}
642
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600643static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700644{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600645
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700646 struct device_node *n;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600647 int ret, i, j;
648 const char *key;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700649 for_each_child_of_node(node, n) {
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600650 struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700651
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600652 cpu->nlevels++;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700653
654 ret = parse_cpu_mode(n, l);
Maulik Shah598a1722018-05-24 15:52:38 +0530655 if (ret) {
656 of_node_put(n);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600657 return ret;
Maulik Shah598a1722018-05-24 15:52:38 +0530658 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700659
660 ret = parse_power_params(n, &l->pwr);
Maulik Shah598a1722018-05-24 15:52:38 +0530661 if (ret) {
662 of_node_put(n);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600663 return ret;
Maulik Shah598a1722018-05-24 15:52:38 +0530664 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700665 key = "qcom,use-broadcast-timer";
666 l->use_bc_timer = of_property_read_bool(n, key);
667
Maulik Shah296ddb12017-07-03 12:25:54 +0530668 key = "qcom,is-reset";
669 l->is_reset = of_property_read_bool(n, key);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700670
671 key = "qcom,reset-level";
672 ret = of_property_read_u32(n, key, &l->reset_level);
673 if (ret == -EINVAL)
674 l->reset_level = LPM_RESET_LVL_NONE;
675 else if (ret)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600676 return ret;
Maulik Shah598a1722018-05-24 15:52:38 +0530677 of_node_put(n);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700678 }
Maulik Shah296ddb12017-07-03 12:25:54 +0530679
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600680 for (i = 0; i < cpu->nlevels; i++) {
681 for (j = 0; j < cpu->nlevels; j++) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700682 if (i >= j) {
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600683 cpu->levels[i].pwr.residencies[j] = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700684 continue;
685 }
686
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600687 cpu->levels[i].pwr.residencies[j] =
688 calculate_residency(&cpu->levels[i].pwr,
689 &cpu->levels[j].pwr);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700690
Maulik Shah296ddb12017-07-03 12:25:54 +0530691 pr_info("idx %d %u\n", j,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600692 cpu->levels[i].pwr.residencies[j]);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700693 }
694 }
Maulik Shah296ddb12017-07-03 12:25:54 +0530695
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600696 for_each_cpu(i, &cpu->related_cpus) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530697
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600698 per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev,
Maulik Shah296ddb12017-07-03 12:25:54 +0530699 sizeof(uint32_t) * cpu->nlevels, GFP_KERNEL);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600700 if (!per_cpu(max_residency, i))
701 return -ENOMEM;
Maulik Shah296ddb12017-07-03 12:25:54 +0530702
703 per_cpu(min_residency, i) = devm_kzalloc(&lpm_pdev->dev,
704 sizeof(uint32_t) * cpu->nlevels, GFP_KERNEL);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600705 if (!per_cpu(min_residency, i))
706 return -ENOMEM;
Maulik Shah296ddb12017-07-03 12:25:54 +0530707
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600708 set_optimum_cpu_residency(cpu, i, true);
709 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700710
711 return 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600712}
713
714static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
715{
Maulik Shah4519cc32018-05-11 10:21:22 +0530716 int ret;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600717 char *key;
718 struct lpm_cpu *cpu;
719
720 cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL);
721 if (!cpu)
Maulik Shah296ddb12017-07-03 12:25:54 +0530722 return -ENOMEM;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600723
724 if (get_cpumask_for_node(node, &cpu->related_cpus))
725 return -EINVAL;
726
727 cpu->parent = c;
728
729 key = "qcom,psci-mode-shift";
730 ret = of_property_read_u32(node, key, &cpu->psci_mode_shift);
Maulik Shah296ddb12017-07-03 12:25:54 +0530731 if (ret)
Maulik Shah4519cc32018-05-11 10:21:22 +0530732 goto failed;
Maulik Shah296ddb12017-07-03 12:25:54 +0530733
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600734 key = "qcom,psci-mode-mask";
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600735 ret = of_property_read_u32(node, key, &cpu->psci_mode_mask);
Maulik Shah296ddb12017-07-03 12:25:54 +0530736 if (ret)
Maulik Shah4519cc32018-05-11 10:21:22 +0530737 goto failed;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600738
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530739 key = "qcom,disable-prediction";
740 cpu->lpm_prediction = !(of_property_read_bool(node, key));
741
742 if (cpu->lpm_prediction) {
743 key = "qcom,ref-stddev";
744 ret = of_property_read_u32(node, key, &cpu->ref_stddev);
745 if (ret || cpu->ref_stddev < STDDEV_LOW ||
746 cpu->ref_stddev > STDDEV_HIGH)
747 cpu->ref_stddev = DEFAULT_STDDEV;
748
749 key = "qcom,tmr-add";
750 ret = of_property_read_u32(node, key, &cpu->tmr_add);
751 if (ret || cpu->tmr_add < TIMER_ADD_LOW ||
752 cpu->tmr_add > TIMER_ADD_HIGH)
753 cpu->tmr_add = DEFAULT_TIMER_ADD;
754
755 key = "qcom,ref-premature-cnt";
756 ret = of_property_read_u32(node, key, &cpu->ref_premature_cnt);
757 if (ret || cpu->ref_premature_cnt < PREMATURE_CNT_LOW ||
758 cpu->ref_premature_cnt > PREMATURE_CNT_HIGH)
759 cpu->ref_premature_cnt = DEFAULT_PREMATURE_CNT;
760 }
Mahesh Sivasubramanian73810922017-10-16 16:46:56 -0600761
Maulik Shah296ddb12017-07-03 12:25:54 +0530762 key = "parse_cpu";
763 ret = parse_cpu(node, cpu);
764 if (ret)
Maulik Shah4519cc32018-05-11 10:21:22 +0530765 goto failed;
Maulik Shah296ddb12017-07-03 12:25:54 +0530766
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600767 cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
768 list_add(&cpu->list, &c->cpu);
Maulik Shah296ddb12017-07-03 12:25:54 +0530769
770 return ret;
771
Maulik Shah4519cc32018-05-11 10:21:22 +0530772failed:
Maulik Shah296ddb12017-07-03 12:25:54 +0530773 pr_err("Failed to read key: %s node: %s\n", key, node->name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700774 return ret;
775}
776
777void free_cluster_node(struct lpm_cluster *cluster)
778{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600779 struct lpm_cpu *cpu, *n;
Mahesh Sivasubramanianf11913b2017-11-28 10:06:17 -0700780 struct lpm_cluster *cl, *m;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700781
Mahesh Sivasubramanianf11913b2017-11-28 10:06:17 -0700782 list_for_each_entry_safe(cl, m, &cluster->child, list) {
783 list_del(&cl->list);
784 free_cluster_node(cl);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700785 };
786
Maulik Shah4519cc32018-05-11 10:21:22 +0530787 list_for_each_entry_safe(cpu, n, &cluster->cpu, list)
Mahesh Sivasubramanianf11913b2017-11-28 10:06:17 -0700788 list_del(&cpu->list);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700789}
790
791/*
792 * TODO:
793 * Expects a CPU or a cluster only. This ensures that affinity
794 * level of a cluster is consistent with reference to its
795 * child nodes.
796 */
797struct lpm_cluster *parse_cluster(struct device_node *node,
798 struct lpm_cluster *parent)
799{
800 struct lpm_cluster *c;
801 struct device_node *n;
802 char *key;
803 int ret = 0;
804 int i, j;
805
806 c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
807 if (!c)
808 return ERR_PTR(-ENOMEM);
809
810 ret = parse_cluster_params(node, c);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700811 if (ret)
812 goto failed_parse_params;
813
Maria Yu1e4ce032017-09-15 00:04:32 +0800814 INIT_LIST_HEAD(&c->list);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700815 INIT_LIST_HEAD(&c->child);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600816 INIT_LIST_HEAD(&c->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700817 c->parent = parent;
818 spin_lock_init(&c->sync_lock);
819 c->min_child_level = NR_LPM_LEVELS;
820
821 for_each_child_of_node(node, n) {
822
823 if (!n->name)
824 continue;
Maulik Shah296ddb12017-07-03 12:25:54 +0530825
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700826 key = "qcom,pm-cluster-level";
827 if (!of_node_cmp(n->name, key)) {
Maulik Shah598a1722018-05-24 15:52:38 +0530828 if (parse_cluster_level(n, c)) {
829 of_node_put(n);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700830 goto failed_parse_cluster;
Maulik Shah598a1722018-05-24 15:52:38 +0530831 }
832 of_node_put(n);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700833 continue;
834 }
835
836 key = "qcom,pm-cluster";
837 if (!of_node_cmp(n->name, key)) {
838 struct lpm_cluster *child;
839
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700840 child = parse_cluster(n, c);
Maulik Shah598a1722018-05-24 15:52:38 +0530841 if (!child) {
842 of_node_put(n);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700843 goto failed_parse_cluster;
Maulik Shah598a1722018-05-24 15:52:38 +0530844 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700845
846 list_add(&child->list, &c->child);
847 cpumask_or(&c->child_cpus, &c->child_cpus,
848 &child->child_cpus);
849 c->aff_level = child->aff_level + 1;
Maulik Shah598a1722018-05-24 15:52:38 +0530850 of_node_put(n);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700851 continue;
852 }
853
854 key = "qcom,pm-cpu";
855 if (!of_node_cmp(n->name, key)) {
Maulik Shah598a1722018-05-24 15:52:38 +0530856 if (parse_cpu_levels(n, c)) {
857 of_node_put(n);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700858 goto failed_parse_cluster;
Maulik Shah598a1722018-05-24 15:52:38 +0530859 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700860
861 c->aff_level = 1;
Maulik Shah598a1722018-05-24 15:52:38 +0530862 of_node_put(n);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700863 }
864 }
865
866 if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
867 c->last_level = c->default_level;
868 else
869 c->last_level = c->nlevels-1;
870
871 for (i = 0; i < c->nlevels; i++) {
872 for (j = 0; j < c->nlevels; j++) {
873 if (i >= j) {
874 c->levels[i].pwr.residencies[j] = 0;
875 continue;
876 }
877 c->levels[i].pwr.residencies[j] = calculate_residency(
878 &c->levels[i].pwr, &c->levels[j].pwr);
879 }
880 }
881 set_optimum_cluster_residency(c, true);
882 return c;
883
884failed_parse_cluster:
885 pr_err("Failed parse cluster:%s\n", key);
886 if (parent)
887 list_del(&c->list);
888 free_cluster_node(c);
889failed_parse_params:
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700890 pr_err("Failed parse params\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700891 return NULL;
892}
893struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
894{
895 struct device_node *top = NULL;
Maulik Shah598a1722018-05-24 15:52:38 +0530896 struct lpm_cluster *c;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700897
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700898 top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
899 if (!top) {
900 pr_err("Failed to find root node\n");
901 return ERR_PTR(-ENODEV);
902 }
903
904 lpm_pdev = pdev;
Maulik Shah598a1722018-05-24 15:52:38 +0530905 c = parse_cluster(top, NULL);
906 of_node_put(top);
907 return c;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700908}
909
910void cluster_dt_walkthrough(struct lpm_cluster *cluster)
911{
912 struct list_head *list;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600913 struct lpm_cpu *cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700914 int i, j;
915 static int id;
916 char str[10] = {0};
917
918 if (!cluster)
919 return;
920
921 for (i = 0; i < id; i++)
922 snprintf(str+i, 10 - i, "\t");
923 pr_info("%d\n", __LINE__);
924
925 for (i = 0; i < cluster->nlevels; i++) {
926 struct lpm_cluster_level *l = &cluster->levels[i];
Maulik Shah296ddb12017-07-03 12:25:54 +0530927 pr_info("cluster: %s \t level: %s\n", cluster->cluster_name,
928 l->level_name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700929 }
930
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600931 list_for_each_entry(cpu, &cluster->cpu, list) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700932 pr_info("%d\n", __LINE__);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600933 for (j = 0; j < cpu->nlevels; j++)
Maulik Shah296ddb12017-07-03 12:25:54 +0530934 pr_info("%s\tCPU level name: %s\n", str,
935 cpu->levels[j].name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700936 }
937
938 id++;
939
940 list_for_each(list, &cluster->child) {
941 struct lpm_cluster *n;
942
943 pr_info("%d\n", __LINE__);
944 n = list_entry(list, typeof(*n), list);
945 cluster_dt_walkthrough(n);
946 }
947 id--;
948}