Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Viresh Kumar | bb176f7 | 2013-06-19 14:19:33 +0530 | [diff] [blame] | 3 | * linux/include/linux/cpufreq.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Viresh Kumar | bb176f7 | 2013-06-19 14:19:33 +0530 | [diff] [blame] | 5 | * Copyright (C) 2001 Russell King |
| 6 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | */ |
| 8 | #ifndef _LINUX_CPUFREQ_H |
| 9 | #define _LINUX_CPUFREQ_H |
| 10 | |
Viresh Kumar | 652ed95 | 2014-01-09 20:38:43 +0530 | [diff] [blame] | 11 | #include <linux/clk.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/cpumask.h> |
Viresh Kumar | 5ff0a26 | 2013-08-06 22:53:03 +0530 | [diff] [blame] | 13 | #include <linux/completion.h> |
| 14 | #include <linux/kobject.h> |
| 15 | #include <linux/notifier.h> |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 16 | #include <linux/pm_qos.h> |
Srivatsa S. Bhat | 12478cf | 2014-03-24 13:35:44 +0530 | [diff] [blame] | 17 | #include <linux/spinlock.h> |
Viresh Kumar | 5ff0a26 | 2013-08-06 22:53:03 +0530 | [diff] [blame] | 18 | #include <linux/sysfs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | /********************************************************************* |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 21 | * CPUFREQ INTERFACE * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | *********************************************************************/ |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 23 | /* |
| 24 | * Frequency values here are CPU kHz |
| 25 | * |
Dave Jones | b53cc6e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 26 | * Maximum transition latency is in nanoseconds - if it's unknown, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | * CPUFREQ_ETERNAL shall be used. |
| 28 | */ |
| 29 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 30 | #define CPUFREQ_ETERNAL (-1) |
| 31 | #define CPUFREQ_NAME_LEN 16 |
Viresh Kumar | 565ebe8 | 2017-02-03 15:26:25 +0530 | [diff] [blame] | 32 | /* Print length for names. Extra 1 space for accommodating '\n' in prints */ |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 33 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) |
| 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | struct cpufreq_governor; |
| 36 | |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 37 | enum cpufreq_table_sorting { |
| 38 | CPUFREQ_TABLE_UNSORTED, |
| 39 | CPUFREQ_TABLE_SORTED_ASCENDING, |
| 40 | CPUFREQ_TABLE_SORTED_DESCENDING |
| 41 | }; |
| 42 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | struct cpufreq_cpuinfo { |
| 44 | unsigned int max_freq; |
| 45 | unsigned int min_freq; |
Thiago Farina | 335dc33 | 2011-04-28 20:42:53 -0300 | [diff] [blame] | 46 | |
| 47 | /* in 10^(-9) s = nanoseconds */ |
| 48 | unsigned int transition_latency; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | }; |
| 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | struct cpufreq_policy { |
Viresh Kumar | 951fc5f | 2013-01-31 02:03:53 +0000 | [diff] [blame] | 52 | /* CPUs sharing clock, require sw coordination */ |
| 53 | cpumask_var_t cpus; /* Online CPUs only */ |
| 54 | cpumask_var_t related_cpus; /* Online + Offline CPUs */ |
Rafael J. Wysocki | 559ed40 | 2015-07-26 02:07:47 +0200 | [diff] [blame] | 55 | cpumask_var_t real_cpus; /* Related and present */ |
Viresh Kumar | 951fc5f | 2013-01-31 02:03:53 +0000 | [diff] [blame] | 56 | |
Viresh Kumar | 62b36cc | 2013-02-01 06:40:02 +0000 | [diff] [blame] | 57 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs |
Venkatesh Pallipadi | 3b2d994 | 2005-12-14 15:05:00 -0500 | [diff] [blame] | 58 | should set cpufreq */ |
Saravana Kannan | 9d16f20 | 2015-05-18 10:43:31 +0530 | [diff] [blame] | 59 | unsigned int cpu; /* cpu managing this policy, must be online */ |
Saravana Kannan | 9d16f20 | 2015-05-18 10:43:31 +0530 | [diff] [blame] | 60 | |
Viresh Kumar | 652ed95 | 2014-01-09 20:38:43 +0530 | [diff] [blame] | 61 | struct clk *clk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | struct cpufreq_cpuinfo cpuinfo;/* see above */ |
| 63 | |
| 64 | unsigned int min; /* in kHz */ |
| 65 | unsigned int max; /* in kHz */ |
| 66 | unsigned int cur; /* in kHz, only needed if cpufreq |
| 67 | * governors are used */ |
Viresh Kumar | 1c03a2d | 2014-06-02 22:49:28 +0530 | [diff] [blame] | 68 | unsigned int restore_freq; /* = policy->cur before transition */ |
Viresh Kumar | e28867e | 2014-03-04 11:00:27 +0800 | [diff] [blame] | 69 | unsigned int suspend_freq; /* freq to set during suspend */ |
| 70 | |
Thiago Farina | 335dc33 | 2011-04-28 20:42:53 -0300 | [diff] [blame] | 71 | unsigned int policy; /* see above */ |
Srinivas Pandruvada | 69030dd1 | 2015-12-01 16:52:14 -0800 | [diff] [blame] | 72 | unsigned int last_policy; /* policy before unplug */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | struct cpufreq_governor *governor; /* see below */ |
Viresh Kumar | 7bd353a | 2013-03-27 15:58:57 +0000 | [diff] [blame] | 74 | void *governor_data; |
Viresh Kumar | 4573237 | 2015-05-12 12:22:34 +0530 | [diff] [blame] | 75 | char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | struct work_struct update; /* if update_policy() needs to be |
| 78 | * called, but you're in IRQ context */ |
| 79 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 80 | struct freq_constraints constraints; |
| 81 | struct freq_qos_request *min_freq_req; |
| 82 | struct freq_qos_request *max_freq_req; |
| 83 | |
Viresh Kumar | e0b3165 | 2014-03-10 14:53:33 +0530 | [diff] [blame] | 84 | struct cpufreq_frequency_table *freq_table; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 85 | enum cpufreq_table_sorting freq_table_sorted; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
Lukasz Majewski | c88a1f8 | 2013-08-06 22:53:08 +0530 | [diff] [blame] | 87 | struct list_head policy_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | struct kobject kobj; |
| 89 | struct completion kobj_unregister; |
viresh kumar | ad7722d | 2013-10-18 19:10:15 +0530 | [diff] [blame] | 90 | |
| 91 | /* |
| 92 | * The rules for this semaphore: |
| 93 | * - Any routine that wants to read from the policy structure will |
| 94 | * do a down_read on this semaphore. |
| 95 | * - Any routine that will write to the policy structure and/or may take away |
| 96 | * the policy altogether (eg. CPU hotplug), will hold this lock in write |
| 97 | * mode before doing so. |
viresh kumar | ad7722d | 2013-10-18 19:10:15 +0530 | [diff] [blame] | 98 | */ |
| 99 | struct rw_semaphore rwsem; |
Srivatsa S. Bhat | 12478cf | 2014-03-24 13:35:44 +0530 | [diff] [blame] | 100 | |
Rafael J. Wysocki | b7898fd | 2016-03-30 03:47:49 +0200 | [diff] [blame] | 101 | /* |
| 102 | * Fast switch flags: |
| 103 | * - fast_switch_possible should be set by the driver if it can |
| 104 | * guarantee that frequency can be changed on any CPU sharing the |
| 105 | * policy and that the change will affect all of the policy CPUs then. |
| 106 | * - fast_switch_enabled is to be set by governors that support fast |
Viresh Kumar | 565ebe8 | 2017-02-03 15:26:25 +0530 | [diff] [blame] | 107 | * frequency switching with the help of cpufreq_enable_fast_switch(). |
Rafael J. Wysocki | b7898fd | 2016-03-30 03:47:49 +0200 | [diff] [blame] | 108 | */ |
| 109 | bool fast_switch_possible; |
| 110 | bool fast_switch_enabled; |
| 111 | |
Rafael J. Wysocki | 1b72e7f | 2017-04-11 00:20:41 +0200 | [diff] [blame] | 112 | /* |
| 113 | * Preferred average time interval between consecutive invocations of |
| 114 | * the driver to set the frequency for this policy. To be set by the |
| 115 | * scaling driver (0, which is the default, means no preference). |
| 116 | */ |
| 117 | unsigned int transition_delay_us; |
| 118 | |
Viresh Kumar | 99d14d0 | 2017-07-28 12:16:39 +0530 | [diff] [blame] | 119 | /* |
| 120 | * Remote DVFS flag (Not added to the driver structure as we don't want |
| 121 | * to access another structure from scheduler hotpath). |
| 122 | * |
| 123 | * Should be set if CPUs can do DVFS on behalf of other CPUs from |
| 124 | * different cpufreq policies. |
| 125 | */ |
| 126 | bool dvfs_possible_from_any_cpu; |
| 127 | |
Steve Muckle | e3c0623 | 2016-07-13 13:25:25 -0700 | [diff] [blame] | 128 | /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ |
| 129 | unsigned int cached_target_freq; |
Viresh Kumar | 292072c | 2020-07-30 08:59:40 +0530 | [diff] [blame] | 130 | unsigned int cached_resolved_idx; |
Steve Muckle | e3c0623 | 2016-07-13 13:25:25 -0700 | [diff] [blame] | 131 | |
Srivatsa S. Bhat | 12478cf | 2014-03-24 13:35:44 +0530 | [diff] [blame] | 132 | /* Synchronization for frequency transitions */ |
| 133 | bool transition_ongoing; /* Tracks transition status */ |
| 134 | spinlock_t transition_lock; |
| 135 | wait_queue_head_t transition_wait; |
Srivatsa S. Bhat | ca654dc | 2014-05-05 12:52:39 +0530 | [diff] [blame] | 136 | struct task_struct *transition_task; /* Task which is doing the transition */ |
Viresh Kumar | 413fffc | 2014-08-28 11:22:23 +0530 | [diff] [blame] | 137 | |
Viresh Kumar | a9aaf29 | 2015-01-13 11:34:00 +0530 | [diff] [blame] | 138 | /* cpufreq-stats */ |
| 139 | struct cpufreq_stats *stats; |
| 140 | |
Viresh Kumar | 413fffc | 2014-08-28 11:22:23 +0530 | [diff] [blame] | 141 | /* For cpufreq driver's internal use */ |
| 142 | void *driver_data; |
Amit Kucheria | 5c238a8 | 2019-01-30 10:52:01 +0530 | [diff] [blame] | 143 | |
| 144 | /* Pointer to the cooling device if used for thermal mitigation */ |
| 145 | struct thermal_cooling_device *cdev; |
Viresh Kumar | 67d874c | 2019-07-08 16:27:52 +0530 | [diff] [blame] | 146 | |
| 147 | struct notifier_block nb_min; |
| 148 | struct notifier_block nb_max; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | }; |
| 150 | |
Rafael J. Wysocki | 1e4f63a | 2020-01-26 23:40:11 +0100 | [diff] [blame] | 151 | /* |
| 152 | * Used for passing new cpufreq policy data to the cpufreq driver's ->verify() |
| 153 | * callback for sanitization. That callback is only expected to modify the min |
| 154 | * and max values, if necessary, and specifically it must not update the |
| 155 | * frequency table. |
| 156 | */ |
| 157 | struct cpufreq_policy_data { |
| 158 | struct cpufreq_cpuinfo cpuinfo; |
| 159 | struct cpufreq_frequency_table *freq_table; |
| 160 | unsigned int cpu; |
| 161 | unsigned int min; /* in kHz */ |
| 162 | unsigned int max; /* in kHz */ |
| 163 | }; |
| 164 | |
Viresh Kumar | df24014 | 2019-04-29 15:03:58 +0530 | [diff] [blame] | 165 | struct cpufreq_freqs { |
| 166 | struct cpufreq_policy *policy; |
| 167 | unsigned int old; |
| 168 | unsigned int new; |
| 169 | u8 flags; /* flags of cpufreq_driver, see below. */ |
| 170 | }; |
| 171 | |
Viresh Kumar | 62b36cc | 2013-02-01 06:40:02 +0000 | [diff] [blame] | 172 | /* Only for ACPI */ |
Venkatesh Pallipadi | 46f18e3 | 2006-06-26 00:34:43 -0400 | [diff] [blame] | 173 | #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ |
| 174 | #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ |
| 175 | #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ |
| 176 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | |
Daniel Vetter | c75b505 | 2013-10-08 10:56:11 +0200 | [diff] [blame] | 178 | #ifdef CONFIG_CPU_FREQ |
Rafael J. Wysocki | 1f0bd44 | 2015-09-16 02:17:49 +0200 | [diff] [blame] | 179 | struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 180 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
Viresh Kumar | 3a3e9e0 | 2013-08-06 22:53:05 +0530 | [diff] [blame] | 181 | void cpufreq_cpu_put(struct cpufreq_policy *policy); |
Daniel Vetter | c75b505 | 2013-10-08 10:56:11 +0200 | [diff] [blame] | 182 | #else |
Rafael J. Wysocki | 1f0bd44 | 2015-09-16 02:17:49 +0200 | [diff] [blame] | 183 | static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) |
| 184 | { |
| 185 | return NULL; |
| 186 | } |
Daniel Vetter | c75b505 | 2013-10-08 10:56:11 +0200 | [diff] [blame] | 187 | static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
| 188 | { |
| 189 | return NULL; |
| 190 | } |
| 191 | static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } |
| 192 | #endif |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 193 | |
Rafael J. Wysocki | 9083e49 | 2019-03-26 12:19:52 +0100 | [diff] [blame] | 194 | static inline bool policy_is_inactive(struct cpufreq_policy *policy) |
| 195 | { |
| 196 | return cpumask_empty(policy->cpus); |
| 197 | } |
| 198 | |
Fabio Baltieri | 2624f90 | 2013-01-31 09:44:40 +0000 | [diff] [blame] | 199 | static inline bool policy_is_shared(struct cpufreq_policy *policy) |
| 200 | { |
| 201 | return cpumask_weight(policy->cpus) > 1; |
| 202 | } |
| 203 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 204 | #ifdef CONFIG_CPU_FREQ |
| 205 | unsigned int cpufreq_get(unsigned int cpu); |
| 206 | unsigned int cpufreq_quick_get(unsigned int cpu); |
| 207 | unsigned int cpufreq_quick_get_max(unsigned int cpu); |
Ionela Voinescu | bbce8ea | 2020-03-05 09:06:25 +0000 | [diff] [blame] | 208 | unsigned int cpufreq_get_hw_max_freq(unsigned int cpu); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 209 | void disable_cpufreq(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 211 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); |
Rafael J. Wysocki | 9083e49 | 2019-03-26 12:19:52 +0100 | [diff] [blame] | 212 | |
| 213 | struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); |
| 214 | void cpufreq_cpu_release(struct cpufreq_policy *policy); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 215 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); |
Viresh Kumar | c57b25b | 2019-07-04 13:06:22 +0530 | [diff] [blame] | 216 | void refresh_frequency_limits(struct cpufreq_policy *policy); |
Rafael J. Wysocki | 30248fe | 2016-11-18 13:59:21 +0100 | [diff] [blame] | 217 | void cpufreq_update_policy(unsigned int cpu); |
Rafael J. Wysocki | 5a25e3f | 2019-03-26 12:15:13 +0100 | [diff] [blame] | 218 | void cpufreq_update_limits(unsigned int cpu); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 219 | bool have_governor_per_policy(void); |
Ionela Voinescu | 874f635 | 2020-09-01 21:55:47 +0100 | [diff] [blame] | 220 | bool cpufreq_supports_freq_invariance(void); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 221 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); |
Rafael J. Wysocki | b7898fd | 2016-03-30 03:47:49 +0200 | [diff] [blame] | 222 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); |
Rafael J. Wysocki | 6c9d9c8 | 2016-04-07 23:38:46 +0200 | [diff] [blame] | 223 | void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 224 | #else |
| 225 | static inline unsigned int cpufreq_get(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | { |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 227 | return 0; |
| 228 | } |
| 229 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) |
| 230 | { |
| 231 | return 0; |
| 232 | } |
| 233 | static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) |
| 234 | { |
| 235 | return 0; |
| 236 | } |
Ionela Voinescu | bbce8ea | 2020-03-05 09:06:25 +0000 | [diff] [blame] | 237 | static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) |
| 238 | { |
| 239 | return 0; |
| 240 | } |
Ionela Voinescu | 874f635 | 2020-09-01 21:55:47 +0100 | [diff] [blame] | 241 | static inline bool cpufreq_supports_freq_invariance(void) |
| 242 | { |
| 243 | return false; |
| 244 | } |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 245 | static inline void disable_cpufreq(void) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Rafael J. Wysocki | 1aefc75 | 2016-05-31 22:14:44 +0200 | [diff] [blame] | 248 | #ifdef CONFIG_CPU_FREQ_STAT |
| 249 | void cpufreq_stats_create_table(struct cpufreq_policy *policy); |
| 250 | void cpufreq_stats_free_table(struct cpufreq_policy *policy); |
| 251 | void cpufreq_stats_record_transition(struct cpufreq_policy *policy, |
| 252 | unsigned int new_freq); |
| 253 | #else |
| 254 | static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } |
| 255 | static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } |
| 256 | static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, |
| 257 | unsigned int new_freq) { } |
| 258 | #endif /* CONFIG_CPU_FREQ_STAT */ |
| 259 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | /********************************************************************* |
| 261 | * CPUFREQ DRIVER INTERFACE * |
| 262 | *********************************************************************/ |
| 263 | |
| 264 | #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ |
| 265 | #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ |
Stratos Karafotis | 5b0c0b1 | 2014-06-30 19:59:33 +0300 | [diff] [blame] | 266 | #define CPUFREQ_RELATION_C 2 /* closest frequency to target */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 268 | struct freq_attr { |
| 269 | struct attribute attr; |
| 270 | ssize_t (*show)(struct cpufreq_policy *, char *); |
| 271 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); |
| 272 | }; |
| 273 | |
| 274 | #define cpufreq_freq_attr_ro(_name) \ |
| 275 | static struct freq_attr _name = \ |
| 276 | __ATTR(_name, 0444, show_##_name, NULL) |
| 277 | |
| 278 | #define cpufreq_freq_attr_ro_perm(_name, _perm) \ |
| 279 | static struct freq_attr _name = \ |
| 280 | __ATTR(_name, _perm, show_##_name, NULL) |
| 281 | |
| 282 | #define cpufreq_freq_attr_rw(_name) \ |
| 283 | static struct freq_attr _name = \ |
| 284 | __ATTR(_name, 0644, show_##_name, store_##_name) |
| 285 | |
Markus Mayer | ee7930e | 2016-11-07 10:02:23 -0800 | [diff] [blame] | 286 | #define cpufreq_freq_attr_wo(_name) \ |
| 287 | static struct freq_attr _name = \ |
| 288 | __ATTR(_name, 0200, NULL, store_##_name) |
| 289 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 290 | #define define_one_global_ro(_name) \ |
Viresh Kumar | 625c85a | 2019-01-25 12:53:07 +0530 | [diff] [blame] | 291 | static struct kobj_attribute _name = \ |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 292 | __ATTR(_name, 0444, show_##_name, NULL) |
| 293 | |
| 294 | #define define_one_global_rw(_name) \ |
Viresh Kumar | 625c85a | 2019-01-25 12:53:07 +0530 | [diff] [blame] | 295 | static struct kobj_attribute _name = \ |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 296 | __ATTR(_name, 0644, show_##_name, store_##_name) |
| 297 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
| 299 | struct cpufreq_driver { |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 300 | char name[CPUFREQ_NAME_LEN]; |
Rafael J. Wysocki | 1c53435 | 2020-10-23 17:35:19 +0200 | [diff] [blame] | 301 | u16 flags; |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 302 | void *driver_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | |
| 304 | /* needed by all drivers */ |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 305 | int (*init)(struct cpufreq_policy *policy); |
Rafael J. Wysocki | 1e4f63a | 2020-01-26 23:40:11 +0100 | [diff] [blame] | 306 | int (*verify)(struct cpufreq_policy_data *policy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | |
| 308 | /* define one out of two */ |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 309 | int (*setpolicy)(struct cpufreq_policy *policy); |
Viresh Kumar | 1c03a2d | 2014-06-02 22:49:28 +0530 | [diff] [blame] | 310 | |
| 311 | /* |
| 312 | * On failure, should always restore frequency to policy->restore_freq |
| 313 | * (i.e. old freq). |
| 314 | */ |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 315 | int (*target)(struct cpufreq_policy *policy, |
| 316 | unsigned int target_freq, |
| 317 | unsigned int relation); /* Deprecated */ |
| 318 | int (*target_index)(struct cpufreq_policy *policy, |
| 319 | unsigned int index); |
Rafael J. Wysocki | b7898fd | 2016-03-30 03:47:49 +0200 | [diff] [blame] | 320 | unsigned int (*fast_switch)(struct cpufreq_policy *policy, |
| 321 | unsigned int target_freq); |
Steve Muckle | e3c0623 | 2016-07-13 13:25:25 -0700 | [diff] [blame] | 322 | |
| 323 | /* |
| 324 | * Caches and returns the lowest driver-supported frequency greater than |
| 325 | * or equal to the target frequency, subject to any driver limitations. |
| 326 | * Does not set the frequency. Only to be implemented for drivers with |
| 327 | * target(). |
| 328 | */ |
| 329 | unsigned int (*resolve_freq)(struct cpufreq_policy *policy, |
| 330 | unsigned int target_freq); |
| 331 | |
Viresh Kumar | 1c03a2d | 2014-06-02 22:49:28 +0530 | [diff] [blame] | 332 | /* |
| 333 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION |
| 334 | * unset. |
| 335 | * |
| 336 | * get_intermediate should return a stable intermediate frequency |
| 337 | * platform wants to switch to and target_intermediate() should set CPU |
Wang Wenhu | 2909438 | 2020-05-13 07:18:54 -0700 | [diff] [blame] | 338 | * to that frequency, before jumping to the frequency corresponding |
Viresh Kumar | 1c03a2d | 2014-06-02 22:49:28 +0530 | [diff] [blame] | 339 | * to 'index'. Core will take care of sending notifications and driver |
| 340 | * doesn't have to handle them in target_intermediate() or |
| 341 | * target_index(). |
| 342 | * |
| 343 | * Drivers can return '0' from get_intermediate() in case they don't |
| 344 | * wish to switch to intermediate frequency for some target frequency. |
| 345 | * In that case core will directly call ->target_index(). |
| 346 | */ |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 347 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, |
| 348 | unsigned int index); |
| 349 | int (*target_intermediate)(struct cpufreq_policy *policy, |
| 350 | unsigned int index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | |
| 352 | /* should be defined, if possible */ |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 353 | unsigned int (*get)(unsigned int cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | |
Rafael J. Wysocki | 5a25e3f | 2019-03-26 12:15:13 +0100 | [diff] [blame] | 355 | /* Called to update policy limits on firmware notifications. */ |
| 356 | void (*update_limits)(unsigned int cpu); |
| 357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | /* optional */ |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 359 | int (*bios_limit)(int cpu, unsigned int *limit); |
venkatesh.pallipadi@intel.com | bf0b90e | 2008-08-04 11:59:07 -0700 | [diff] [blame] | 360 | |
Viresh Kumar | 91a12e9 | 2019-02-12 16:36:04 +0530 | [diff] [blame] | 361 | int (*online)(struct cpufreq_policy *policy); |
| 362 | int (*offline)(struct cpufreq_policy *policy); |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 363 | int (*exit)(struct cpufreq_policy *policy); |
| 364 | void (*stop_cpu)(struct cpufreq_policy *policy); |
| 365 | int (*suspend)(struct cpufreq_policy *policy); |
| 366 | int (*resume)(struct cpufreq_policy *policy); |
Viresh Kumar | 7c45cf3 | 2014-11-27 06:07:51 +0530 | [diff] [blame] | 367 | |
| 368 | /* Will be called after the driver is fully initialized */ |
| 369 | void (*ready)(struct cpufreq_policy *policy); |
| 370 | |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 371 | struct freq_attr **attr; |
Lukasz Majewski | 6f19efc | 2013-12-20 15:24:49 +0100 | [diff] [blame] | 372 | |
| 373 | /* platform specific boost support code */ |
Viresh Kumar | 90452e61 | 2014-11-27 06:07:49 +0530 | [diff] [blame] | 374 | bool boost_enabled; |
Xiongfeng Wang | cf6fada | 2020-05-30 10:08:30 +0800 | [diff] [blame] | 375 | int (*set_boost)(struct cpufreq_policy *policy, int state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | }; |
| 377 | |
| 378 | /* flags */ |
Amit Kucheria | 8321be6 | 2019-01-21 14:17:37 +0530 | [diff] [blame] | 379 | |
| 380 | /* driver isn't removed even if all ->init() calls failed */ |
| 381 | #define CPUFREQ_STICKY BIT(0) |
| 382 | |
| 383 | /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ |
| 384 | #define CPUFREQ_CONST_LOOPS BIT(1) |
| 385 | |
| 386 | /* don't warn on suspend/resume speed mismatches */ |
| 387 | #define CPUFREQ_PM_NO_WARN BIT(2) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | |
Viresh Kumar | 0b981e7 | 2013-10-02 14:13:18 +0530 | [diff] [blame] | 389 | /* |
| 390 | * This should be set by platforms having multiple clock-domains, i.e. |
| 391 | * supporting multiple policies. With this sysfs directories of governor would |
| 392 | * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same |
| 393 | * governor with different tunables for different clusters. |
| 394 | */ |
Amit Kucheria | 8321be6 | 2019-01-21 14:17:37 +0530 | [diff] [blame] | 395 | #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3) |
Viresh Kumar | 0b981e7 | 2013-10-02 14:13:18 +0530 | [diff] [blame] | 396 | |
Viresh Kumar | 7dbf694 | 2013-10-29 18:56:06 +0530 | [diff] [blame] | 397 | /* |
| 398 | * Driver will do POSTCHANGE notifications from outside of their ->target() |
| 399 | * routine and so must set cpufreq_driver->flags with this flag, so that core |
| 400 | * can handle them specially. |
| 401 | */ |
Amit Kucheria | 8321be6 | 2019-01-21 14:17:37 +0530 | [diff] [blame] | 402 | #define CPUFREQ_ASYNC_NOTIFICATION BIT(4) |
Viresh Kumar | 7dbf694 | 2013-10-29 18:56:06 +0530 | [diff] [blame] | 403 | |
Viresh Kumar | ae6b427 | 2013-12-03 11:20:45 +0530 | [diff] [blame] | 404 | /* |
| 405 | * Set by drivers which want cpufreq core to check if CPU is running at a |
| 406 | * frequency present in freq-table exposed by the driver. For these drivers if |
| 407 | * CPU is found running at an out of table freq, we will try to set it to a freq |
| 408 | * from the table. And if that fails, we will stop further boot process by |
| 409 | * issuing a BUG_ON(). |
| 410 | */ |
Amit Kucheria | 8321be6 | 2019-01-21 14:17:37 +0530 | [diff] [blame] | 411 | #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5) |
Viresh Kumar | ae6b427 | 2013-12-03 11:20:45 +0530 | [diff] [blame] | 412 | |
Viresh Kumar | fe829ed | 2017-07-19 15:42:48 +0530 | [diff] [blame] | 413 | /* |
| 414 | * Set by drivers to disallow use of governors with "dynamic_switching" flag |
| 415 | * set. |
| 416 | */ |
Amit Kucheria | 8321be6 | 2019-01-21 14:17:37 +0530 | [diff] [blame] | 417 | #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) |
Viresh Kumar | fe829ed | 2017-07-19 15:42:48 +0530 | [diff] [blame] | 418 | |
Amit Kucheria | 5c238a8 | 2019-01-30 10:52:01 +0530 | [diff] [blame] | 419 | /* |
| 420 | * Set by drivers that want the core to automatically register the cpufreq |
| 421 | * driver as a thermal cooling device. |
| 422 | */ |
| 423 | #define CPUFREQ_IS_COOLING_DEV BIT(7) |
| 424 | |
Rafael J. Wysocki | 1c53435 | 2020-10-23 17:35:19 +0200 | [diff] [blame] | 425 | /* |
| 426 | * Set by drivers that need to update internale upper and lower boundaries along |
| 427 | * with the target frequency and so the core and governors should also invoke |
| 428 | * the diver if the target frequency does not change, but the policy min or max |
| 429 | * may have changed. |
| 430 | */ |
| 431 | #define CPUFREQ_NEED_UPDATE_LIMITS BIT(8) |
| 432 | |
Linus Torvalds | 221dee2 | 2007-02-26 14:55:48 -0800 | [diff] [blame] | 433 | int cpufreq_register_driver(struct cpufreq_driver *driver_data); |
| 434 | int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | |
Rafael J. Wysocki | a62f68f | 2020-10-23 17:35:46 +0200 | [diff] [blame] | 436 | bool cpufreq_driver_test_flags(u16 flags); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 437 | const char *cpufreq_get_current_driver(void); |
Thomas Petazzoni | 51315cd | 2014-10-19 11:30:27 +0200 | [diff] [blame] | 438 | void *cpufreq_get_driver_data(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | |
Daniel Lezcano | bcc6156 | 2019-06-25 13:32:41 +0200 | [diff] [blame] | 440 | static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv) |
| 441 | { |
| 442 | return IS_ENABLED(CONFIG_CPU_THERMAL) && |
| 443 | (drv->flags & CPUFREQ_IS_COOLING_DEV); |
| 444 | } |
| 445 | |
Rafael J. Wysocki | 1e4f63a | 2020-01-26 23:40:11 +0100 | [diff] [blame] | 446 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy, |
| 447 | unsigned int min, |
| 448 | unsigned int max) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | { |
| 450 | if (policy->min < min) |
| 451 | policy->min = min; |
| 452 | if (policy->max < min) |
| 453 | policy->max = min; |
| 454 | if (policy->min > max) |
| 455 | policy->min = max; |
| 456 | if (policy->max > max) |
| 457 | policy->max = max; |
| 458 | if (policy->min > policy->max) |
| 459 | policy->min = policy->max; |
| 460 | return; |
| 461 | } |
| 462 | |
Viresh Kumar | be49e34 | 2013-10-02 14:13:19 +0530 | [diff] [blame] | 463 | static inline void |
Rafael J. Wysocki | 1e4f63a | 2020-01-26 23:40:11 +0100 | [diff] [blame] | 464 | cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy) |
Viresh Kumar | be49e34 | 2013-10-02 14:13:19 +0530 | [diff] [blame] | 465 | { |
| 466 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
Rafael J. Wysocki | 1e4f63a | 2020-01-26 23:40:11 +0100 | [diff] [blame] | 467 | policy->cpuinfo.max_freq); |
Viresh Kumar | be49e34 | 2013-10-02 14:13:19 +0530 | [diff] [blame] | 468 | } |
| 469 | |
Viresh Kumar | 2f0aea9 | 2014-03-04 11:00:26 +0800 | [diff] [blame] | 470 | #ifdef CONFIG_CPU_FREQ |
| 471 | void cpufreq_suspend(void); |
| 472 | void cpufreq_resume(void); |
Viresh Kumar | e28867e | 2014-03-04 11:00:27 +0800 | [diff] [blame] | 473 | int cpufreq_generic_suspend(struct cpufreq_policy *policy); |
Viresh Kumar | 2f0aea9 | 2014-03-04 11:00:26 +0800 | [diff] [blame] | 474 | #else |
| 475 | static inline void cpufreq_suspend(void) {} |
| 476 | static inline void cpufreq_resume(void) {} |
| 477 | #endif |
| 478 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | /********************************************************************* |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 480 | * CPUFREQ NOTIFIER INTERFACE * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | *********************************************************************/ |
| 482 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 483 | #define CPUFREQ_TRANSITION_NOTIFIER (0) |
| 484 | #define CPUFREQ_POLICY_NOTIFIER (1) |
| 485 | |
| 486 | /* Transition notifiers */ |
| 487 | #define CPUFREQ_PRECHANGE (0) |
| 488 | #define CPUFREQ_POSTCHANGE (1) |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 489 | |
| 490 | /* Policy Notifiers */ |
Viresh Kumar | df0eea4 | 2019-07-23 11:44:09 +0530 | [diff] [blame] | 491 | #define CPUFREQ_CREATE_POLICY (0) |
| 492 | #define CPUFREQ_REMOVE_POLICY (1) |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 493 | |
| 494 | #ifdef CONFIG_CPU_FREQ |
| 495 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); |
| 496 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); |
| 497 | |
Srivatsa S. Bhat | 12478cf | 2014-03-24 13:35:44 +0530 | [diff] [blame] | 498 | void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, |
| 499 | struct cpufreq_freqs *freqs); |
| 500 | void cpufreq_freq_transition_end(struct cpufreq_policy *policy, |
| 501 | struct cpufreq_freqs *freqs, int transition_failed); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 502 | |
| 503 | #else /* CONFIG_CPU_FREQ */ |
| 504 | static inline int cpufreq_register_notifier(struct notifier_block *nb, |
| 505 | unsigned int list) |
| 506 | { |
| 507 | return 0; |
| 508 | } |
| 509 | static inline int cpufreq_unregister_notifier(struct notifier_block *nb, |
| 510 | unsigned int list) |
| 511 | { |
| 512 | return 0; |
| 513 | } |
| 514 | #endif /* !CONFIG_CPU_FREQ */ |
| 515 | |
| 516 | /** |
| 517 | * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch |
| 518 | * safe) |
| 519 | * @old: old value |
| 520 | * @div: divisor |
| 521 | * @mult: multiplier |
| 522 | * |
| 523 | * |
| 524 | * new = old * mult / div |
| 525 | */ |
| 526 | static inline unsigned long cpufreq_scale(unsigned long old, u_int div, |
| 527 | u_int mult) |
| 528 | { |
| 529 | #if BITS_PER_LONG == 32 |
| 530 | u64 result = ((u64) old) * ((u64) mult); |
| 531 | do_div(result, div); |
| 532 | return (unsigned long) result; |
| 533 | |
| 534 | #elif BITS_PER_LONG == 64 |
| 535 | unsigned long result = old * ((u64) mult); |
| 536 | result /= div; |
| 537 | return result; |
| 538 | #endif |
| 539 | } |
| 540 | |
| 541 | /********************************************************************* |
| 542 | * CPUFREQ GOVERNORS * |
| 543 | *********************************************************************/ |
| 544 | |
Rafael J. Wysocki | 1e4f63a | 2020-01-26 23:40:11 +0100 | [diff] [blame] | 545 | #define CPUFREQ_POLICY_UNKNOWN (0) |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 546 | /* |
| 547 | * If (cpufreq_driver->target) exists, the ->governor decides what frequency |
| 548 | * within the limits is used. If (cpufreq_driver->setpolicy> exists, these |
| 549 | * two generic policies are available: |
| 550 | */ |
| 551 | #define CPUFREQ_POLICY_POWERSAVE (1) |
| 552 | #define CPUFREQ_POLICY_PERFORMANCE (2) |
| 553 | |
Rafael J. Wysocki | 379480d | 2016-03-22 02:51:56 +0100 | [diff] [blame] | 554 | /* |
| 555 | * The polling frequency depends on the capability of the processor. Default |
| 556 | * polling frequency is 1000 times the transition latency of the processor. The |
| 557 | * ondemand governor will work on any processor with transition latency <= 10ms, |
| 558 | * using appropriate sampling rate. |
Rafael J. Wysocki | 379480d | 2016-03-22 02:51:56 +0100 | [diff] [blame] | 559 | */ |
Rafael J. Wysocki | 379480d | 2016-03-22 02:51:56 +0100 | [diff] [blame] | 560 | #define LATENCY_MULTIPLIER (1000) |
Rafael J. Wysocki | 379480d | 2016-03-22 02:51:56 +0100 | [diff] [blame] | 561 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 562 | struct cpufreq_governor { |
| 563 | char name[CPUFREQ_NAME_LEN]; |
Rafael J. Wysocki | e788892 | 2016-06-02 23:24:15 +0200 | [diff] [blame] | 564 | int (*init)(struct cpufreq_policy *policy); |
| 565 | void (*exit)(struct cpufreq_policy *policy); |
| 566 | int (*start)(struct cpufreq_policy *policy); |
| 567 | void (*stop)(struct cpufreq_policy *policy); |
| 568 | void (*limits)(struct cpufreq_policy *policy); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 569 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, |
| 570 | char *buf); |
| 571 | int (*store_setspeed) (struct cpufreq_policy *policy, |
| 572 | unsigned int freq); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 573 | struct list_head governor_list; |
| 574 | struct module *owner; |
Rafael J. Wysocki | 9a2a9eb | 2020-11-10 18:25:57 +0100 | [diff] [blame] | 575 | u8 flags; |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 576 | }; |
| 577 | |
Rafael J. Wysocki | 9a2a9eb | 2020-11-10 18:25:57 +0100 | [diff] [blame] | 578 | /* Governor flags */ |
| 579 | |
| 580 | /* For governors which change frequency dynamically by themselves */ |
| 581 | #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0) |
| 582 | |
Rafael J. Wysocki | 218f668 | 2020-11-10 18:26:10 +0100 | [diff] [blame^] | 583 | /* For governors wanting the target frequency to be set exactly */ |
| 584 | #define CPUFREQ_GOV_STRICT_TARGET BIT(1) |
| 585 | |
Rafael J. Wysocki | 9a2a9eb | 2020-11-10 18:25:57 +0100 | [diff] [blame] | 586 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 587 | /* Pass a target to the cpufreq driver */ |
Rafael J. Wysocki | b7898fd | 2016-03-30 03:47:49 +0200 | [diff] [blame] | 588 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, |
| 589 | unsigned int target_freq); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 590 | int cpufreq_driver_target(struct cpufreq_policy *policy, |
| 591 | unsigned int target_freq, |
| 592 | unsigned int relation); |
| 593 | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
| 594 | unsigned int target_freq, |
| 595 | unsigned int relation); |
Steve Muckle | e3c0623 | 2016-07-13 13:25:25 -0700 | [diff] [blame] | 596 | unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, |
| 597 | unsigned int target_freq); |
Viresh Kumar | aa7519a | 2017-07-19 15:42:42 +0530 | [diff] [blame] | 598 | unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 599 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
| 600 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); |
Rafael J. Wysocki | f6ebbcf | 2020-08-06 14:03:55 +0200 | [diff] [blame] | 601 | int cpufreq_start_governor(struct cpufreq_policy *policy); |
| 602 | void cpufreq_stop_governor(struct cpufreq_policy *policy); |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 603 | |
Quentin Perret | 10dd857 | 2020-06-29 13:54:59 +0530 | [diff] [blame] | 604 | #define cpufreq_governor_init(__governor) \ |
| 605 | static int __init __governor##_init(void) \ |
| 606 | { \ |
| 607 | return cpufreq_register_governor(&__governor); \ |
| 608 | } \ |
| 609 | core_initcall(__governor##_init) |
| 610 | |
| 611 | #define cpufreq_governor_exit(__governor) \ |
| 612 | static void __exit __governor##_exit(void) \ |
| 613 | { \ |
| 614 | return cpufreq_unregister_governor(&__governor); \ |
| 615 | } \ |
| 616 | module_exit(__governor##_exit) |
| 617 | |
Rafael J. Wysocki | de1df26 | 2016-02-05 02:37:42 +0100 | [diff] [blame] | 618 | struct cpufreq_governor *cpufreq_default_governor(void); |
| 619 | struct cpufreq_governor *cpufreq_fallback_governor(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | |
Viresh Kumar | bf2be2d | 2016-05-18 17:55:31 +0530 | [diff] [blame] | 621 | static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) |
| 622 | { |
| 623 | if (policy->max < policy->cur) |
| 624 | __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); |
| 625 | else if (policy->min > policy->cur) |
| 626 | __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); |
| 627 | } |
| 628 | |
Rafael J. Wysocki | 66893b6 | 2016-03-22 02:50:45 +0100 | [diff] [blame] | 629 | /* Governor attribute set */ |
| 630 | struct gov_attr_set { |
| 631 | struct kobject kobj; |
| 632 | struct list_head policy_list; |
| 633 | struct mutex update_lock; |
| 634 | int usage_count; |
| 635 | }; |
| 636 | |
| 637 | /* sysfs ops for cpufreq governors */ |
| 638 | extern const struct sysfs_ops governor_sysfs_ops; |
| 639 | |
| 640 | void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); |
| 641 | void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); |
| 642 | unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); |
| 643 | |
| 644 | /* Governor sysfs attribute */ |
| 645 | struct governor_attr { |
| 646 | struct attribute attr; |
| 647 | ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); |
| 648 | ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, |
| 649 | size_t count); |
| 650 | }; |
| 651 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | /********************************************************************* |
| 653 | * FREQUENCY TABLE HELPERS * |
| 654 | *********************************************************************/ |
| 655 | |
Viresh Kumar | 7f4b046 | 2014-03-28 19:11:47 +0530 | [diff] [blame] | 656 | /* Special Values of .frequency field */ |
Brian W Hart | 2b1987a | 2014-06-27 16:09:39 -0500 | [diff] [blame] | 657 | #define CPUFREQ_ENTRY_INVALID ~0u |
| 658 | #define CPUFREQ_TABLE_END ~1u |
Viresh Kumar | 7f4b046 | 2014-03-28 19:11:47 +0530 | [diff] [blame] | 659 | /* Special Values of .flags field */ |
| 660 | #define CPUFREQ_BOOST_FREQ (1 << 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | |
| 662 | struct cpufreq_frequency_table { |
Viresh Kumar | 7f4b046 | 2014-03-28 19:11:47 +0530 | [diff] [blame] | 663 | unsigned int flags; |
Viresh Kumar | 5070158 | 2013-03-30 16:25:15 +0530 | [diff] [blame] | 664 | unsigned int driver_data; /* driver specific data, not used by core */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | unsigned int frequency; /* kHz - doesn't need to be in ascending |
| 666 | * order */ |
| 667 | }; |
| 668 | |
Nishanth Menon | a0dd7b7 | 2014-05-05 08:33:50 -0500 | [diff] [blame] | 669 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) |
| 670 | int dev_pm_opp_init_cpufreq_table(struct device *dev, |
| 671 | struct cpufreq_frequency_table **table); |
| 672 | void dev_pm_opp_free_cpufreq_table(struct device *dev, |
| 673 | struct cpufreq_frequency_table **table); |
| 674 | #else |
| 675 | static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, |
| 676 | struct cpufreq_frequency_table |
| 677 | **table) |
| 678 | { |
| 679 | return -EINVAL; |
| 680 | } |
| 681 | |
| 682 | static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, |
| 683 | struct cpufreq_frequency_table |
| 684 | **table) |
| 685 | { |
| 686 | } |
| 687 | #endif |
| 688 | |
Stratos Karafotis | 27e289d | 2014-04-25 23:15:23 +0300 | [diff] [blame] | 689 | /* |
| 690 | * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table |
| 691 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
| 692 | * @table: the cpufreq_frequency_table * to iterate over. |
| 693 | */ |
| 694 | |
| 695 | #define cpufreq_for_each_entry(pos, table) \ |
| 696 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) |
| 697 | |
| 698 | /* |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 699 | * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table |
| 700 | * with index |
| 701 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
| 702 | * @table: the cpufreq_frequency_table * to iterate over. |
| 703 | * @idx: the table entry currently being processed |
| 704 | */ |
| 705 | |
| 706 | #define cpufreq_for_each_entry_idx(pos, table, idx) \ |
| 707 | for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ |
| 708 | pos++, idx++) |
| 709 | |
| 710 | /* |
Stratos Karafotis | 27e289d | 2014-04-25 23:15:23 +0300 | [diff] [blame] | 711 | * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table |
| 712 | * excluding CPUFREQ_ENTRY_INVALID frequencies. |
| 713 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
| 714 | * @table: the cpufreq_frequency_table * to iterate over. |
| 715 | */ |
| 716 | |
Rafael J. Wysocki | 34b0870 | 2016-02-26 00:22:57 +0100 | [diff] [blame] | 717 | #define cpufreq_for_each_valid_entry(pos, table) \ |
| 718 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \ |
| 719 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ |
| 720 | continue; \ |
| 721 | else |
Stratos Karafotis | 27e289d | 2014-04-25 23:15:23 +0300 | [diff] [blame] | 722 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 723 | /* |
| 724 | * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq |
| 725 | * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. |
| 726 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
| 727 | * @table: the cpufreq_frequency_table * to iterate over. |
| 728 | * @idx: the table entry currently being processed |
| 729 | */ |
| 730 | |
| 731 | #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ |
| 732 | cpufreq_for_each_entry_idx(pos, table, idx) \ |
| 733 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ |
| 734 | continue; \ |
| 735 | else |
| 736 | |
| 737 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, |
| 739 | struct cpufreq_frequency_table *table); |
| 740 | |
Rafael J. Wysocki | 1e4f63a | 2020-01-26 23:40:11 +0100 | [diff] [blame] | 741 | int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | struct cpufreq_frequency_table *table); |
Rafael J. Wysocki | 1e4f63a | 2020-01-26 23:40:11 +0100 | [diff] [blame] | 743 | int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 745 | int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, |
| 746 | unsigned int target_freq, |
| 747 | unsigned int relation); |
Viresh Kumar | d391669 | 2013-12-03 11:20:46 +0530 | [diff] [blame] | 748 | int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, |
| 749 | unsigned int freq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | |
Viresh Kumar | 74aca95 | 2013-08-06 22:53:04 +0530 | [diff] [blame] | 751 | ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); |
| 752 | |
Lukasz Majewski | 6f19efc | 2013-12-20 15:24:49 +0100 | [diff] [blame] | 753 | #ifdef CONFIG_CPU_FREQ |
| 754 | int cpufreq_boost_trigger_state(int state); |
Lukasz Majewski | 6f19efc | 2013-12-20 15:24:49 +0100 | [diff] [blame] | 755 | int cpufreq_boost_enabled(void); |
Viresh Kumar | 44139ed | 2015-07-29 16:23:09 +0530 | [diff] [blame] | 756 | int cpufreq_enable_boost_support(void); |
| 757 | bool policy_has_boost_freq(struct cpufreq_policy *policy); |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 758 | |
| 759 | /* Find lowest freq at or above target in a table in ascending order */ |
| 760 | static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, |
| 761 | unsigned int target_freq) |
| 762 | { |
| 763 | struct cpufreq_frequency_table *table = policy->freq_table; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 764 | struct cpufreq_frequency_table *pos; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 765 | unsigned int freq; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 766 | int idx, best = -1; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 767 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 768 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
Aaro Koskinen | 899bb66 | 2016-10-12 08:45:05 +0530 | [diff] [blame] | 769 | freq = pos->frequency; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 770 | |
| 771 | if (freq >= target_freq) |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 772 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 773 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 774 | best = idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 775 | } |
| 776 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 777 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 778 | } |
| 779 | |
| 780 | /* Find lowest freq at or above target in a table in descending order */ |
| 781 | static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, |
| 782 | unsigned int target_freq) |
| 783 | { |
| 784 | struct cpufreq_frequency_table *table = policy->freq_table; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 785 | struct cpufreq_frequency_table *pos; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 786 | unsigned int freq; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 787 | int idx, best = -1; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 788 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 789 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
Aaro Koskinen | 899bb66 | 2016-10-12 08:45:05 +0530 | [diff] [blame] | 790 | freq = pos->frequency; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 791 | |
| 792 | if (freq == target_freq) |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 793 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 794 | |
| 795 | if (freq > target_freq) { |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 796 | best = idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 797 | continue; |
| 798 | } |
| 799 | |
| 800 | /* No freq found above target_freq */ |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 801 | if (best == -1) |
| 802 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 803 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 804 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 805 | } |
| 806 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 807 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | /* Works only on sorted freq-tables */ |
| 811 | static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, |
| 812 | unsigned int target_freq) |
| 813 | { |
| 814 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
| 815 | |
| 816 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
| 817 | return cpufreq_table_find_index_al(policy, target_freq); |
| 818 | else |
| 819 | return cpufreq_table_find_index_dl(policy, target_freq); |
| 820 | } |
| 821 | |
| 822 | /* Find highest freq at or below target in a table in ascending order */ |
| 823 | static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, |
| 824 | unsigned int target_freq) |
| 825 | { |
| 826 | struct cpufreq_frequency_table *table = policy->freq_table; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 827 | struct cpufreq_frequency_table *pos; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 828 | unsigned int freq; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 829 | int idx, best = -1; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 830 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 831 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
Aaro Koskinen | 899bb66 | 2016-10-12 08:45:05 +0530 | [diff] [blame] | 832 | freq = pos->frequency; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 833 | |
| 834 | if (freq == target_freq) |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 835 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 836 | |
| 837 | if (freq < target_freq) { |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 838 | best = idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 839 | continue; |
| 840 | } |
| 841 | |
| 842 | /* No freq found below target_freq */ |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 843 | if (best == -1) |
| 844 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 845 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 846 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 847 | } |
| 848 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 849 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 850 | } |
| 851 | |
| 852 | /* Find highest freq at or below target in a table in descending order */ |
| 853 | static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, |
| 854 | unsigned int target_freq) |
| 855 | { |
| 856 | struct cpufreq_frequency_table *table = policy->freq_table; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 857 | struct cpufreq_frequency_table *pos; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 858 | unsigned int freq; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 859 | int idx, best = -1; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 860 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 861 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
Aaro Koskinen | 899bb66 | 2016-10-12 08:45:05 +0530 | [diff] [blame] | 862 | freq = pos->frequency; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 863 | |
| 864 | if (freq <= target_freq) |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 865 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 866 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 867 | best = idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 868 | } |
| 869 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 870 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 871 | } |
| 872 | |
| 873 | /* Works only on sorted freq-tables */ |
| 874 | static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, |
| 875 | unsigned int target_freq) |
| 876 | { |
| 877 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
| 878 | |
| 879 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
| 880 | return cpufreq_table_find_index_ah(policy, target_freq); |
| 881 | else |
| 882 | return cpufreq_table_find_index_dh(policy, target_freq); |
| 883 | } |
| 884 | |
| 885 | /* Find closest freq to target in a table in ascending order */ |
| 886 | static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, |
| 887 | unsigned int target_freq) |
| 888 | { |
| 889 | struct cpufreq_frequency_table *table = policy->freq_table; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 890 | struct cpufreq_frequency_table *pos; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 891 | unsigned int freq; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 892 | int idx, best = -1; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 893 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 894 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
Aaro Koskinen | 899bb66 | 2016-10-12 08:45:05 +0530 | [diff] [blame] | 895 | freq = pos->frequency; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 896 | |
| 897 | if (freq == target_freq) |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 898 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 899 | |
| 900 | if (freq < target_freq) { |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 901 | best = idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 902 | continue; |
| 903 | } |
| 904 | |
| 905 | /* No freq found below target_freq */ |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 906 | if (best == -1) |
| 907 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 908 | |
| 909 | /* Choose the closest freq */ |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 910 | if (target_freq - table[best].frequency > freq - target_freq) |
| 911 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 912 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 913 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 914 | } |
| 915 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 916 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 917 | } |
| 918 | |
| 919 | /* Find closest freq to target in a table in descending order */ |
| 920 | static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, |
| 921 | unsigned int target_freq) |
| 922 | { |
| 923 | struct cpufreq_frequency_table *table = policy->freq_table; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 924 | struct cpufreq_frequency_table *pos; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 925 | unsigned int freq; |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 926 | int idx, best = -1; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 927 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 928 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
Aaro Koskinen | 899bb66 | 2016-10-12 08:45:05 +0530 | [diff] [blame] | 929 | freq = pos->frequency; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 930 | |
| 931 | if (freq == target_freq) |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 932 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 933 | |
| 934 | if (freq > target_freq) { |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 935 | best = idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 936 | continue; |
| 937 | } |
| 938 | |
| 939 | /* No freq found above target_freq */ |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 940 | if (best == -1) |
| 941 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 942 | |
| 943 | /* Choose the closest freq */ |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 944 | if (table[best].frequency - target_freq > target_freq - freq) |
| 945 | return idx; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 946 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 947 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 948 | } |
| 949 | |
Dominik Brodowski | ffd81dc | 2018-01-30 06:42:37 +0100 | [diff] [blame] | 950 | return best; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 951 | } |
| 952 | |
| 953 | /* Works only on sorted freq-tables */ |
| 954 | static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, |
| 955 | unsigned int target_freq) |
| 956 | { |
| 957 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
| 958 | |
| 959 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
| 960 | return cpufreq_table_find_index_ac(policy, target_freq); |
| 961 | else |
| 962 | return cpufreq_table_find_index_dc(policy, target_freq); |
| 963 | } |
| 964 | |
| 965 | static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
| 966 | unsigned int target_freq, |
| 967 | unsigned int relation) |
| 968 | { |
| 969 | if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) |
| 970 | return cpufreq_table_index_unsorted(policy, target_freq, |
| 971 | relation); |
| 972 | |
| 973 | switch (relation) { |
| 974 | case CPUFREQ_RELATION_L: |
| 975 | return cpufreq_table_find_index_l(policy, target_freq); |
| 976 | case CPUFREQ_RELATION_H: |
| 977 | return cpufreq_table_find_index_h(policy, target_freq); |
| 978 | case CPUFREQ_RELATION_C: |
| 979 | return cpufreq_table_find_index_c(policy, target_freq); |
| 980 | default: |
Viresh Kumar | 30b8e6b | 2020-08-27 10:54:16 +0530 | [diff] [blame] | 981 | WARN_ON_ONCE(1); |
| 982 | return 0; |
Viresh Kumar | da0c6dc | 2016-06-27 16:04:07 +0530 | [diff] [blame] | 983 | } |
| 984 | } |
Viresh Kumar | 55d8529 | 2017-04-25 15:57:15 +0530 | [diff] [blame] | 985 | |
| 986 | static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy) |
| 987 | { |
| 988 | struct cpufreq_frequency_table *pos; |
| 989 | int count = 0; |
| 990 | |
| 991 | if (unlikely(!policy->freq_table)) |
| 992 | return 0; |
| 993 | |
| 994 | cpufreq_for_each_valid_entry(pos, policy->freq_table) |
| 995 | count++; |
| 996 | |
| 997 | return count; |
| 998 | } |
Lukasz Majewski | 6f19efc | 2013-12-20 15:24:49 +0100 | [diff] [blame] | 999 | #else |
| 1000 | static inline int cpufreq_boost_trigger_state(int state) |
| 1001 | { |
| 1002 | return 0; |
| 1003 | } |
Lukasz Majewski | 6f19efc | 2013-12-20 15:24:49 +0100 | [diff] [blame] | 1004 | static inline int cpufreq_boost_enabled(void) |
| 1005 | { |
| 1006 | return 0; |
| 1007 | } |
Viresh Kumar | 44139ed | 2015-07-29 16:23:09 +0530 | [diff] [blame] | 1008 | |
| 1009 | static inline int cpufreq_enable_boost_support(void) |
| 1010 | { |
| 1011 | return -EINVAL; |
| 1012 | } |
| 1013 | |
| 1014 | static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) |
| 1015 | { |
| 1016 | return false; |
| 1017 | } |
Lukasz Majewski | 6f19efc | 2013-12-20 15:24:49 +0100 | [diff] [blame] | 1018 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | |
Quentin Perret | 531b5c9 | 2018-12-03 09:56:21 +0000 | [diff] [blame] | 1020 | #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
| 1021 | void sched_cpufreq_governor_change(struct cpufreq_policy *policy, |
| 1022 | struct cpufreq_governor *old_gov); |
| 1023 | #else |
| 1024 | static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy, |
| 1025 | struct cpufreq_governor *old_gov) { } |
| 1026 | #endif |
| 1027 | |
Rafael J. Wysocki | 7d5905d | 2017-11-15 02:13:40 +0100 | [diff] [blame] | 1028 | extern void arch_freq_prepare_all(void); |
Len Brown | f8475ce | 2017-06-23 22:11:52 -0700 | [diff] [blame] | 1029 | extern unsigned int arch_freq_get_on_cpu(int cpu); |
| 1030 | |
Ionela Voinescu | a20b705 | 2020-09-24 13:30:15 +0100 | [diff] [blame] | 1031 | #ifndef arch_set_freq_scale |
| 1032 | static __always_inline |
| 1033 | void arch_set_freq_scale(const struct cpumask *cpus, |
| 1034 | unsigned long cur_freq, |
| 1035 | unsigned long max_freq) |
| 1036 | { |
| 1037 | } |
| 1038 | #endif |
Dietmar Eggemann | e7d5459 | 2017-09-26 17:41:07 +0100 | [diff] [blame] | 1039 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1040 | /* the following are really really optional */ |
| 1041 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; |
Bartlomiej Zolnierkiewicz | 21c36d3 | 2015-08-07 13:59:16 +0200 | [diff] [blame] | 1042 | extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; |
Viresh Kumar | 1843451 | 2013-10-03 20:27:55 +0530 | [diff] [blame] | 1043 | extern struct freq_attr *cpufreq_generic_attr[]; |
Viresh Kumar | d417e06 | 2018-02-22 11:29:44 +0530 | [diff] [blame] | 1044 | int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); |
Lan Tianyu | f4fd379 | 2013-06-27 15:08:54 +0800 | [diff] [blame] | 1045 | |
Viresh Kumar | 652ed95 | 2014-01-09 20:38:43 +0530 | [diff] [blame] | 1046 | unsigned int cpufreq_generic_get(unsigned int cpu); |
Viresh Kumar | c4dcc8a | 2019-07-16 09:36:08 +0530 | [diff] [blame] | 1047 | void cpufreq_generic_init(struct cpufreq_policy *policy, |
Viresh Kumar | 70e9e77 | 2013-10-03 20:29:07 +0530 | [diff] [blame] | 1048 | struct cpufreq_frequency_table *table, |
| 1049 | unsigned int transition_latency); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | #endif /* _LINUX_CPUFREQ_H */ |