Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * drivers/cpufreq/cpufreq_stats.c |
| 4 | * |
| 5 | * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. |
Dave Jones | 0a829c5 | 2009-01-18 01:49:04 -0500 | [diff] [blame] | 6 | * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | */ |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/cpufreq.h> |
Paul Gortmaker | 5c720d37 | 2011-05-27 13:23:32 -0400 | [diff] [blame] | 11 | #include <linux/module.h> |
Viresh Kumar | 7854c75 | 2020-11-17 17:02:10 +0530 | [diff] [blame] | 12 | #include <linux/sched/clock.h> |
Viresh Kumar | 5ff0a26 | 2013-08-06 22:53:03 +0530 | [diff] [blame] | 13 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | struct cpufreq_stats { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | unsigned int total_trans; |
Viresh Kumar | bb176f7 | 2013-06-19 14:19:33 +0530 | [diff] [blame] | 17 | unsigned long long last_time; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | unsigned int max_state; |
| 19 | unsigned int state_num; |
| 20 | unsigned int last_index; |
Viresh Kumar | 1e7586a | 2012-10-26 00:51:21 +0200 | [diff] [blame] | 21 | u64 *time_in_state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | unsigned int *freq_table; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | unsigned int *trans_table; |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 24 | |
| 25 | /* Deferred reset */ |
| 26 | unsigned int reset_pending; |
| 27 | unsigned long long reset_time; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | }; |
| 29 | |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 30 | static void cpufreq_stats_update(struct cpufreq_stats *stats, |
| 31 | unsigned long long time) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | { |
Viresh Kumar | 7854c75 | 2020-11-17 17:02:10 +0530 | [diff] [blame] | 33 | unsigned long long cur_time = local_clock(); |
Venkatesh Pallipadi | 58f1df2 | 2005-05-25 14:46:50 -0700 | [diff] [blame] | 34 | |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 35 | stats->time_in_state[stats->last_index] += cur_time - time; |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 36 | stats->last_time = cur_time; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | } |
| 38 | |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 39 | static void cpufreq_stats_reset_table(struct cpufreq_stats *stats) |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 40 | { |
| 41 | unsigned int count = stats->max_state; |
| 42 | |
| 43 | memset(stats->time_in_state, 0, count * sizeof(u64)); |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 44 | memset(stats->trans_table, 0, count * count * sizeof(int)); |
Viresh Kumar | 7854c75 | 2020-11-17 17:02:10 +0530 | [diff] [blame] | 45 | stats->last_time = local_clock(); |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 46 | stats->total_trans = 0; |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 47 | |
| 48 | /* Adjust for the time elapsed since reset was requested */ |
| 49 | WRITE_ONCE(stats->reset_pending, 0); |
Rafael J. Wysocki | efad424 | 2020-10-06 21:43:43 +0200 | [diff] [blame] | 50 | /* |
| 51 | * Prevent the reset_time read from being reordered before the |
| 52 | * reset_pending accesses in cpufreq_stats_record_transition(). |
| 53 | */ |
| 54 | smp_rmb(); |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 55 | cpufreq_stats_update(stats, READ_ONCE(stats->reset_time)); |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 56 | } |
| 57 | |
Dave Jones | 0a829c5 | 2009-01-18 01:49:04 -0500 | [diff] [blame] | 58 | static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | { |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 60 | struct cpufreq_stats *stats = policy->stats; |
| 61 | |
| 62 | if (READ_ONCE(stats->reset_pending)) |
| 63 | return sprintf(buf, "%d\n", 0); |
| 64 | else |
Viresh Kumar | b7af608 | 2020-10-12 10:20:07 +0530 | [diff] [blame] | 65 | return sprintf(buf, "%u\n", stats->total_trans); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | } |
Viresh Kumar | 10b8182 | 2019-02-01 11:45:44 +0530 | [diff] [blame] | 67 | cpufreq_freq_attr_ro(total_trans); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
Dave Jones | 0a829c5 | 2009-01-18 01:49:04 -0500 | [diff] [blame] | 69 | static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | { |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 71 | struct cpufreq_stats *stats = policy->stats; |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 72 | bool pending = READ_ONCE(stats->reset_pending); |
| 73 | unsigned long long time; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | ssize_t len = 0; |
| 75 | int i; |
Viresh Kumar | a9aaf29 | 2015-01-13 11:34:00 +0530 | [diff] [blame] | 76 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 77 | for (i = 0; i < stats->state_num; i++) { |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 78 | if (pending) { |
Rafael J. Wysocki | efad424 | 2020-10-06 21:43:43 +0200 | [diff] [blame] | 79 | if (i == stats->last_index) { |
| 80 | /* |
| 81 | * Prevent the reset_time read from occurring |
| 82 | * before the reset_pending read above. |
| 83 | */ |
| 84 | smp_rmb(); |
Viresh Kumar | 7854c75 | 2020-11-17 17:02:10 +0530 | [diff] [blame] | 85 | time = local_clock() - READ_ONCE(stats->reset_time); |
Rafael J. Wysocki | efad424 | 2020-10-06 21:43:43 +0200 | [diff] [blame] | 86 | } else { |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 87 | time = 0; |
Rafael J. Wysocki | efad424 | 2020-10-06 21:43:43 +0200 | [diff] [blame] | 88 | } |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 89 | } else { |
| 90 | time = stats->time_in_state[i]; |
| 91 | if (i == stats->last_index) |
Viresh Kumar | 7854c75 | 2020-11-17 17:02:10 +0530 | [diff] [blame] | 92 | time += local_clock() - stats->last_time; |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 93 | } |
| 94 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 95 | len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], |
Viresh Kumar | 7854c75 | 2020-11-17 17:02:10 +0530 | [diff] [blame] | 96 | nsec_to_clock_t(time)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
| 98 | return len; |
| 99 | } |
Viresh Kumar | 10b8182 | 2019-02-01 11:45:44 +0530 | [diff] [blame] | 100 | cpufreq_freq_attr_ro(time_in_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 102 | /* We don't care what is written to the attribute */ |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 103 | static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, |
| 104 | size_t count) |
| 105 | { |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 106 | struct cpufreq_stats *stats = policy->stats; |
| 107 | |
| 108 | /* |
| 109 | * Defer resetting of stats to cpufreq_stats_record_transition() to |
| 110 | * avoid races. |
| 111 | */ |
Viresh Kumar | 7854c75 | 2020-11-17 17:02:10 +0530 | [diff] [blame] | 112 | WRITE_ONCE(stats->reset_time, local_clock()); |
Rafael J. Wysocki | efad424 | 2020-10-06 21:43:43 +0200 | [diff] [blame] | 113 | /* |
| 114 | * The memory barrier below is to prevent the readers of reset_time from |
| 115 | * seeing a stale or partially updated value. |
| 116 | */ |
| 117 | smp_wmb(); |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 118 | WRITE_ONCE(stats->reset_pending, 1); |
| 119 | |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 120 | return count; |
| 121 | } |
Viresh Kumar | 10b8182 | 2019-02-01 11:45:44 +0530 | [diff] [blame] | 122 | cpufreq_freq_attr_wo(reset); |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 123 | |
Dave Jones | 0a829c5 | 2009-01-18 01:49:04 -0500 | [diff] [blame] | 124 | static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | { |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 126 | struct cpufreq_stats *stats = policy->stats; |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 127 | bool pending = READ_ONCE(stats->reset_pending); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | ssize_t len = 0; |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 129 | int i, j, count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | |
Takashi Iwai | 3c0897c | 2020-03-11 08:13:41 +0100 | [diff] [blame] | 131 | len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n"); |
| 132 | len += scnprintf(buf + len, PAGE_SIZE - len, " : "); |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 133 | for (i = 0; i < stats->state_num; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | if (len >= PAGE_SIZE) |
| 135 | break; |
Takashi Iwai | 3c0897c | 2020-03-11 08:13:41 +0100 | [diff] [blame] | 136 | len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 137 | stats->freq_table[i]); |
Venkatesh Pallipadi | 58f1df2 | 2005-05-25 14:46:50 -0700 | [diff] [blame] | 138 | } |
| 139 | if (len >= PAGE_SIZE) |
Cesar Eduardo Barros | 25aca34 | 2008-02-16 08:41:25 -0200 | [diff] [blame] | 140 | return PAGE_SIZE; |
Venkatesh Pallipadi | 58f1df2 | 2005-05-25 14:46:50 -0700 | [diff] [blame] | 141 | |
Takashi Iwai | 3c0897c | 2020-03-11 08:13:41 +0100 | [diff] [blame] | 142 | len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); |
Venkatesh Pallipadi | 58f1df2 | 2005-05-25 14:46:50 -0700 | [diff] [blame] | 143 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 144 | for (i = 0; i < stats->state_num; i++) { |
Venkatesh Pallipadi | 58f1df2 | 2005-05-25 14:46:50 -0700 | [diff] [blame] | 145 | if (len >= PAGE_SIZE) |
| 146 | break; |
| 147 | |
Takashi Iwai | 3c0897c | 2020-03-11 08:13:41 +0100 | [diff] [blame] | 148 | len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ", |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 149 | stats->freq_table[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 151 | for (j = 0; j < stats->state_num; j++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | if (len >= PAGE_SIZE) |
| 153 | break; |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 154 | |
| 155 | if (pending) |
| 156 | count = 0; |
| 157 | else |
| 158 | count = stats->trans_table[i * stats->max_state + j]; |
| 159 | |
| 160 | len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | } |
Cesar Eduardo Barros | 25aca34 | 2008-02-16 08:41:25 -0200 | [diff] [blame] | 162 | if (len >= PAGE_SIZE) |
| 163 | break; |
Takashi Iwai | 3c0897c | 2020-03-11 08:13:41 +0100 | [diff] [blame] | 164 | len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | } |
Gautham R. Shenoy | f7bc9b2 | 2017-11-07 13:39:29 +0530 | [diff] [blame] | 166 | |
| 167 | if (len >= PAGE_SIZE) { |
| 168 | pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n"); |
| 169 | return -EFBIG; |
| 170 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | return len; |
| 172 | } |
Viresh Kumar | df18e50 | 2013-02-04 11:38:52 +0000 | [diff] [blame] | 173 | cpufreq_freq_attr_ro(trans_table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | static struct attribute *default_attrs[] = { |
Viresh Kumar | df18e50 | 2013-02-04 11:38:52 +0000 | [diff] [blame] | 176 | &total_trans.attr, |
| 177 | &time_in_state.attr, |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 178 | &reset.attr, |
Viresh Kumar | df18e50 | 2013-02-04 11:38:52 +0000 | [diff] [blame] | 179 | &trans_table.attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | NULL |
| 181 | }; |
Arvind Yadav | 402202e | 2017-07-03 13:29:04 +0530 | [diff] [blame] | 182 | static const struct attribute_group stats_attr_group = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | .attrs = default_attrs, |
| 184 | .name = "stats" |
| 185 | }; |
| 186 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 187 | static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | { |
| 189 | int index; |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 190 | for (index = 0; index < stats->max_state; index++) |
| 191 | if (stats->freq_table[index] == freq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | return index; |
| 193 | return -1; |
| 194 | } |
| 195 | |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 196 | void cpufreq_stats_free_table(struct cpufreq_policy *policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 198 | struct cpufreq_stats *stats = policy->stats; |
Viresh Kumar | b8eed8a | 2013-01-14 13:23:03 +0000 | [diff] [blame] | 199 | |
Viresh Kumar | a9aaf29 | 2015-01-13 11:34:00 +0530 | [diff] [blame] | 200 | /* Already freed */ |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 201 | if (!stats) |
Viresh Kumar | 2d13594 | 2014-01-07 07:10:12 +0530 | [diff] [blame] | 202 | return; |
| 203 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 204 | pr_debug("%s: Free stats table\n", __func__); |
Viresh Kumar | 2d13594 | 2014-01-07 07:10:12 +0530 | [diff] [blame] | 205 | |
| 206 | sysfs_remove_group(&policy->kobj, &stats_attr_group); |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 207 | kfree(stats->time_in_state); |
| 208 | kfree(stats); |
Viresh Kumar | a9aaf29 | 2015-01-13 11:34:00 +0530 | [diff] [blame] | 209 | policy->stats = NULL; |
steven finney | 98586ed | 2011-05-02 11:29:17 -0700 | [diff] [blame] | 210 | } |
| 211 | |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 212 | void cpufreq_stats_create_table(struct cpufreq_policy *policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | { |
Shaokun Zhang | 5de1262 | 2021-05-31 15:16:07 +0800 | [diff] [blame] | 214 | unsigned int i = 0, count; |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 215 | struct cpufreq_stats *stats; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | unsigned int alloc_size; |
Viresh Kumar | 55d8529 | 2017-04-25 15:57:15 +0530 | [diff] [blame] | 217 | struct cpufreq_frequency_table *pos; |
Saravana Kannan | ad4c230 | 2014-02-27 17:58:36 -0800 | [diff] [blame] | 218 | |
Viresh Kumar | 55d8529 | 2017-04-25 15:57:15 +0530 | [diff] [blame] | 219 | count = cpufreq_table_count_valid_entries(policy); |
| 220 | if (!count) |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 221 | return; |
Saravana Kannan | ad4c230 | 2014-02-27 17:58:36 -0800 | [diff] [blame] | 222 | |
Viresh Kumar | b8c6744 | 2015-01-06 21:09:01 +0530 | [diff] [blame] | 223 | /* stats already initialized */ |
Viresh Kumar | a9aaf29 | 2015-01-13 11:34:00 +0530 | [diff] [blame] | 224 | if (policy->stats) |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 225 | return; |
Viresh Kumar | b8c6744 | 2015-01-06 21:09:01 +0530 | [diff] [blame] | 226 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 227 | stats = kzalloc(sizeof(*stats), GFP_KERNEL); |
Viresh Kumar | a685c6d | 2015-01-06 21:09:11 +0530 | [diff] [blame] | 228 | if (!stats) |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 229 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
Viresh Kumar | 1e7586a | 2012-10-26 00:51:21 +0200 | [diff] [blame] | 231 | alloc_size = count * sizeof(int) + count * sizeof(u64); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | alloc_size += count * count * sizeof(int); |
Viresh Kumar | a685c6d | 2015-01-06 21:09:11 +0530 | [diff] [blame] | 234 | |
| 235 | /* Allocate memory for time_in_state/freq_table/trans_table in one go */ |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 236 | stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL); |
Viresh Kumar | a685c6d | 2015-01-06 21:09:11 +0530 | [diff] [blame] | 237 | if (!stats->time_in_state) |
| 238 | goto free_stat; |
| 239 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 240 | stats->freq_table = (unsigned int *)(stats->time_in_state + count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 242 | stats->trans_table = stats->freq_table + count; |
Viresh Kumar | a685c6d | 2015-01-06 21:09:11 +0530 | [diff] [blame] | 243 | |
| 244 | stats->max_state = count; |
| 245 | |
| 246 | /* Find valid-unique entries */ |
Viresh Kumar | 55d8529 | 2017-04-25 15:57:15 +0530 | [diff] [blame] | 247 | cpufreq_for_each_valid_entry(pos, policy->freq_table) |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 248 | if (freq_table_get_index(stats, pos->frequency) == -1) |
| 249 | stats->freq_table[i++] = pos->frequency; |
Viresh Kumar | a685c6d | 2015-01-06 21:09:11 +0530 | [diff] [blame] | 250 | |
Viresh Kumar | 490285c | 2015-01-06 21:09:15 +0530 | [diff] [blame] | 251 | stats->state_num = i; |
Viresh Kumar | 7854c75 | 2020-11-17 17:02:10 +0530 | [diff] [blame] | 252 | stats->last_time = local_clock(); |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 253 | stats->last_index = freq_table_get_index(stats, policy->cur); |
Viresh Kumar | a685c6d | 2015-01-06 21:09:11 +0530 | [diff] [blame] | 254 | |
| 255 | policy->stats = stats; |
Shaokun Zhang | 5de1262 | 2021-05-31 15:16:07 +0800 | [diff] [blame] | 256 | if (!sysfs_create_group(&policy->kobj, &stats_attr_group)) |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 257 | return; |
Viresh Kumar | a685c6d | 2015-01-06 21:09:11 +0530 | [diff] [blame] | 258 | |
| 259 | /* We failed, release resources */ |
Viresh Kumar | a9aaf29 | 2015-01-13 11:34:00 +0530 | [diff] [blame] | 260 | policy->stats = NULL; |
Viresh Kumar | a685c6d | 2015-01-06 21:09:11 +0530 | [diff] [blame] | 261 | kfree(stats->time_in_state); |
| 262 | free_stat: |
| 263 | kfree(stats); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | } |
| 265 | |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 266 | void cpufreq_stats_record_transition(struct cpufreq_policy *policy, |
| 267 | unsigned int new_freq) |
Viresh Kumar | b3f9ff8 | 2014-01-07 07:10:13 +0530 | [diff] [blame] | 268 | { |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 269 | struct cpufreq_stats *stats = policy->stats; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | int old_index, new_index; |
| 271 | |
Viresh Kumar | 4958b46 | 2020-10-05 13:26:03 +0530 | [diff] [blame] | 272 | if (unlikely(!stats)) |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 273 | return; |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 274 | |
| 275 | if (unlikely(READ_ONCE(stats->reset_pending))) |
| 276 | cpufreq_stats_reset_table(stats); |
Viresh Kumar | a9aaf29 | 2015-01-13 11:34:00 +0530 | [diff] [blame] | 277 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 278 | old_index = stats->last_index; |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 279 | new_index = freq_table_get_index(stats, new_freq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 281 | /* We can't do stats->time_in_state[-1]= .. */ |
Viresh Kumar | 4958b46 | 2020-10-05 13:26:03 +0530 | [diff] [blame] | 282 | if (unlikely(old_index == -1 || new_index == -1 || old_index == new_index)) |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 283 | return; |
Venkatesh Pallipadi | 8edc59d9 | 2006-12-19 12:58:55 -0800 | [diff] [blame] | 284 | |
Viresh Kumar | 40c3bd4 | 2020-10-05 13:26:01 +0530 | [diff] [blame] | 285 | cpufreq_stats_update(stats, stats->last_time); |
Viresh Kumar | e734769 | 2015-01-06 21:09:14 +0530 | [diff] [blame] | 286 | |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 287 | stats->last_index = new_index; |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 288 | stats->trans_table[old_index * stats->max_state + new_index]++; |
Viresh Kumar | 5094160 | 2015-01-06 21:09:07 +0530 | [diff] [blame] | 289 | stats->total_trans++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | } |