blob: dffa7af1db71638f0a16a7c3c5392e0c39b07f7b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Dave Jones3a58df32009-01-17 22:36:14 -05002 * acpi-cpufreq.c - ACPI Processor P-States Driver
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -07007 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 *
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -070031#include <linux/smp.h>
32#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/cpufreq.h>
Venkatesh Pallipadid395bf12005-08-25 15:59:00 -040034#include <linux/compiler.h>
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -070035#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#include <linux/acpi.h>
Dave Jones3a58df32009-01-17 22:36:14 -050039#include <linux/io.h>
40#include <linux/delay.h>
41#include <linux/uaccess.h>
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <acpi/processor.h>
44
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -070045#include <asm/msr.h>
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -070046#include <asm/processor.h>
47#include <asm/cpufeature.h>
Mark Langsdorfa2fed572010-03-18 18:41:46 +010048#include "mperf.h"
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -070049
Linus Torvalds1da177e2005-04-16 15:20:36 -070050MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52MODULE_LICENSE("GPL");
53
Andre Przywaraacd31622012-09-04 08:28:03 +000054#define PFX "acpi-cpufreq: "
55
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -070056enum {
57 UNDEFINED_CAPABLE = 0,
58 SYSTEM_INTEL_MSR_CAPABLE,
Matthew Garrett3dc9a632012-09-04 08:28:02 +000059 SYSTEM_AMD_MSR_CAPABLE,
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -070060 SYSTEM_IO_CAPABLE,
61};
62
63#define INTEL_MSR_RANGE (0xffff)
Matthew Garrett3dc9a632012-09-04 08:28:02 +000064#define AMD_MSR_RANGE (0x7)
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -070065
Andre Przywara615b7302012-09-04 08:28:07 +000066#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
67
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -070068struct acpi_cpufreq_data {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -070069 struct acpi_processor_performance *acpi_data;
70 struct cpufreq_frequency_table *freq_table;
71 unsigned int resume;
72 unsigned int cpu_feature;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073};
74
Tejun Heof1625062009-10-29 22:34:13 +090075static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
travis@sgi.comea348f32008-01-30 13:33:12 +010076
Fenghua Yu50109292007-08-07 18:40:30 -040077/* acpi_perf_data is a pointer to percpu data. */
Namhyung Kim3f6c4df2010-08-13 23:00:11 +090078static struct acpi_processor_performance __percpu *acpi_perf_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80static struct cpufreq_driver acpi_cpufreq_driver;
81
Venkatesh Pallipadid395bf12005-08-25 15:59:00 -040082static unsigned int acpi_pstate_strict;
Andre Przywara615b7302012-09-04 08:28:07 +000083static bool boost_enabled, boost_supported;
84static struct msr __percpu *msrs;
85
86static bool boost_state(unsigned int cpu)
87{
88 u32 lo, hi;
89 u64 msr;
90
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
96 case X86_VENDOR_AMD:
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
100 }
101 return false;
102}
103
104static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
105{
106 u32 cpu;
107 u32 msr_addr;
108 u64 msr_mask;
109
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
114 break;
115 case X86_VENDOR_AMD:
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
118 break;
119 default:
120 return;
121 }
122
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
124
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
127 if (enable)
128 reg->q &= ~msr_mask;
129 else
130 reg->q |= msr_mask;
131 }
132
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
134}
135
136static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
137 const char *buf, size_t count)
138{
139 int ret;
140 unsigned long val = 0;
141
142 if (!boost_supported)
143 return -EINVAL;
144
145 ret = kstrtoul(buf, 10, &val);
146 if (ret || (val > 1))
147 return -EINVAL;
148
149 if ((val && boost_enabled) || (!val && !boost_enabled))
150 return count;
151
152 get_online_cpus();
153
154 boost_set_msrs(val, cpu_online_mask);
155
156 put_online_cpus();
157
158 boost_enabled = val;
159 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
160
161 return count;
162}
163
164static ssize_t show_global_boost(struct kobject *kobj,
165 struct attribute *attr, char *buf)
166{
167 return sprintf(buf, "%u\n", boost_enabled);
168}
169
170static struct global_attr global_boost = __ATTR(boost, 0644,
171 show_global_boost,
172 store_global_boost);
Venkatesh Pallipadid395bf12005-08-25 15:59:00 -0400173
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700174static int check_est_cpu(unsigned int cpuid)
175{
Mike Travis92cb7612007-10-19 20:35:04 +0200176 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700177
Harald Welte0de51082009-06-08 18:27:54 +0800178 return cpu_has(cpu, X86_FEATURE_EST);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700179}
180
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000181static int check_amd_hwpstate_cpu(unsigned int cpuid)
182{
183 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
184
185 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
186}
187
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700188static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700189{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700190 struct acpi_processor_performance *perf;
191 int i;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700192
193 perf = data->acpi_data;
194
Dave Jones3a58df32009-01-17 22:36:14 -0500195 for (i = 0; i < perf->state_count; i++) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700196 if (value == perf->states[i].status)
197 return data->freq_table[i].frequency;
198 }
199 return 0;
200}
201
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700202static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
203{
204 int i;
Venkatesh Pallipadia6f6e6e2006-10-03 12:37:42 -0700205 struct acpi_processor_performance *perf;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700206
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000207 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
208 msr &= AMD_MSR_RANGE;
209 else
210 msr &= INTEL_MSR_RANGE;
211
Venkatesh Pallipadia6f6e6e2006-10-03 12:37:42 -0700212 perf = data->acpi_data;
213
Dave Jones3a58df32009-01-17 22:36:14 -0500214 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
Venkatesh Pallipadia6f6e6e2006-10-03 12:37:42 -0700215 if (msr == perf->states[data->freq_table[i].index].status)
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700216 return data->freq_table[i].frequency;
217 }
218 return data->freq_table[0].frequency;
219}
220
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700221static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
222{
223 switch (data->cpu_feature) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700224 case SYSTEM_INTEL_MSR_CAPABLE:
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000225 case SYSTEM_AMD_MSR_CAPABLE:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700226 return extract_msr(val, data);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700227 case SYSTEM_IO_CAPABLE:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700228 return extract_io(val, data);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700229 default:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700230 return 0;
231 }
232}
233
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700234struct msr_addr {
235 u32 reg;
236};
237
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700238struct io_addr {
239 u16 port;
240 u8 bit_width;
241};
242
243struct drv_cmd {
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700244 unsigned int type;
Ingo Molnarbfa318a2009-01-15 15:46:08 +0100245 const struct cpumask *mask;
Dave Jones3a58df32009-01-17 22:36:14 -0500246 union {
247 struct msr_addr msr;
248 struct io_addr io;
249 } addr;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700250 u32 val;
251};
252
Andrew Morton01599fc2009-04-13 10:27:49 -0700253/* Called via smp_call_function_single(), on the target CPU */
254static void do_drv_read(void *_cmd)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700255{
Mike Travis72859082009-01-16 15:31:15 -0800256 struct drv_cmd *cmd = _cmd;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700257 u32 h;
258
259 switch (cmd->type) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700260 case SYSTEM_INTEL_MSR_CAPABLE:
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000261 case SYSTEM_AMD_MSR_CAPABLE:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700262 rdmsr(cmd->addr.msr.reg, cmd->val, h);
263 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700264 case SYSTEM_IO_CAPABLE:
Venkatesh Pallipadi4e581ff2006-12-13 10:41:16 -0800265 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
266 &cmd->val,
267 (u32)cmd->addr.io.bit_width);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700268 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700269 default:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700270 break;
271 }
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700272}
273
Andrew Morton01599fc2009-04-13 10:27:49 -0700274/* Called via smp_call_function_many(), on the target CPUs */
275static void do_drv_write(void *_cmd)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700276{
Mike Travis72859082009-01-16 15:31:15 -0800277 struct drv_cmd *cmd = _cmd;
Venki Pallipadi13424f62007-05-23 15:42:13 -0700278 u32 lo, hi;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700279
280 switch (cmd->type) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700281 case SYSTEM_INTEL_MSR_CAPABLE:
Venki Pallipadi13424f62007-05-23 15:42:13 -0700282 rdmsr(cmd->addr.msr.reg, lo, hi);
283 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
284 wrmsr(cmd->addr.msr.reg, lo, hi);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700285 break;
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000286 case SYSTEM_AMD_MSR_CAPABLE:
287 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
288 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700289 case SYSTEM_IO_CAPABLE:
Venkatesh Pallipadi4e581ff2006-12-13 10:41:16 -0800290 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
291 cmd->val,
292 (u32)cmd->addr.io.bit_width);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700293 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700294 default:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700295 break;
296 }
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700297}
298
Dave Jones95dd7222006-10-18 00:41:48 -0400299static void drv_read(struct drv_cmd *cmd)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700300{
Andrew Morton4a283952009-12-21 16:19:58 -0800301 int err;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700302 cmd->val = 0;
303
Andrew Morton4a283952009-12-21 16:19:58 -0800304 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
305 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700306}
307
308static void drv_write(struct drv_cmd *cmd)
309{
Linus Torvaldsea34f432009-04-15 08:05:13 -0700310 int this_cpu;
311
312 this_cpu = get_cpu();
313 if (cpumask_test_cpu(this_cpu, cmd->mask))
314 do_drv_write(cmd);
Andrew Morton01599fc2009-04-13 10:27:49 -0700315 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
Linus Torvaldsea34f432009-04-15 08:05:13 -0700316 put_cpu();
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700317}
318
Mike Travis4d8bb532009-01-04 05:18:08 -0800319static u32 get_cur_val(const struct cpumask *mask)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700320{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700321 struct acpi_processor_performance *perf;
322 struct drv_cmd cmd;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700323
Mike Travis4d8bb532009-01-04 05:18:08 -0800324 if (unlikely(cpumask_empty(mask)))
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700325 return 0;
326
Tejun Heof1625062009-10-29 22:34:13 +0900327 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700328 case SYSTEM_INTEL_MSR_CAPABLE:
329 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
330 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
331 break;
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000332 case SYSTEM_AMD_MSR_CAPABLE:
333 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
334 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
335 break;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700336 case SYSTEM_IO_CAPABLE:
337 cmd.type = SYSTEM_IO_CAPABLE;
Tejun Heof1625062009-10-29 22:34:13 +0900338 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700339 cmd.addr.io.port = perf->control_register.address;
340 cmd.addr.io.bit_width = perf->control_register.bit_width;
341 break;
342 default:
343 return 0;
344 }
345
Ingo Molnarbfa318a2009-01-15 15:46:08 +0100346 cmd.mask = mask;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700347 drv_read(&cmd);
348
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200349 pr_debug("get_cur_val = %u\n", cmd.val);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700350
351 return cmd.val;
352}
353
354static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
355{
Tejun Heof1625062009-10-29 22:34:13 +0900356 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700357 unsigned int freq;
Venkatesh Pallipadie56a7272008-04-28 15:13:43 -0400358 unsigned int cached_freq;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700359
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200360 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700361
362 if (unlikely(data == NULL ||
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700363 data->acpi_data == NULL || data->freq_table == NULL)) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700364 return 0;
365 }
366
Venkatesh Pallipadie56a7272008-04-28 15:13:43 -0400367 cached_freq = data->freq_table[data->acpi_data->state].frequency;
Mike Travise39ad412009-01-04 05:18:10 -0800368 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
Venkatesh Pallipadie56a7272008-04-28 15:13:43 -0400369 if (freq != cached_freq) {
370 /*
371 * The dreaded BIOS frequency change behind our back.
372 * Force set the frequency on next target call.
373 */
374 data->resume = 1;
375 }
376
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200377 pr_debug("cur freq = %u\n", freq);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700378
379 return freq;
380}
381
Mike Travis72859082009-01-16 15:31:15 -0800382static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700383 struct acpi_cpufreq_data *data)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700384{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700385 unsigned int cur_freq;
386 unsigned int i;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700387
Dave Jones3a58df32009-01-17 22:36:14 -0500388 for (i = 0; i < 100; i++) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700389 cur_freq = extract_freq(get_cur_val(mask), data);
390 if (cur_freq == freq)
391 return 1;
392 udelay(10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
394 return 0;
395}
396
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700397static int acpi_cpufreq_target(struct cpufreq_policy *policy,
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700398 unsigned int target_freq, unsigned int relation)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399{
Tejun Heof1625062009-10-29 22:34:13 +0900400 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700401 struct acpi_processor_performance *perf;
402 struct cpufreq_freqs freqs;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700403 struct drv_cmd cmd;
Venkatesh Pallipadi8edc59d92006-12-19 12:58:55 -0800404 unsigned int next_state = 0; /* Index into freq_table */
405 unsigned int next_perf_state = 0; /* Index into perf table */
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700406 unsigned int i;
407 int result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200409 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700411 if (unlikely(data == NULL ||
Dave Jones95dd7222006-10-18 00:41:48 -0400412 data->acpi_data == NULL || data->freq_table == NULL)) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700413 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
415
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500416 perf = data->acpi_data;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700417 result = cpufreq_frequency_table_target(policy,
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700418 data->freq_table,
419 target_freq,
420 relation, &next_state);
Mike Travis4d8bb532009-01-04 05:18:08 -0800421 if (unlikely(result)) {
422 result = -ENODEV;
423 goto out;
424 }
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500425
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700426 next_perf_state = data->freq_table[next_state].index;
Venkatesh Pallipadi7650b282006-10-03 12:36:30 -0700427 if (perf->state == next_perf_state) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700428 if (unlikely(data->resume)) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200429 pr_debug("Called after resume, resetting to P%d\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700430 next_perf_state);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700431 data->resume = 0;
432 } else {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200433 pr_debug("Already at target state (P%d)\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700434 next_perf_state);
Mike Travis4d8bb532009-01-04 05:18:08 -0800435 goto out;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700436 }
437 }
438
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700439 switch (data->cpu_feature) {
440 case SYSTEM_INTEL_MSR_CAPABLE:
441 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
442 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
Venki Pallipadi13424f62007-05-23 15:42:13 -0700443 cmd.val = (u32) perf->states[next_perf_state].control;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700444 break;
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000445 case SYSTEM_AMD_MSR_CAPABLE:
446 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
447 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
448 cmd.val = (u32) perf->states[next_perf_state].control;
449 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700450 case SYSTEM_IO_CAPABLE:
451 cmd.type = SYSTEM_IO_CAPABLE;
452 cmd.addr.io.port = perf->control_register.address;
453 cmd.addr.io.bit_width = perf->control_register.bit_width;
454 cmd.val = (u32) perf->states[next_perf_state].control;
455 break;
456 default:
Mike Travis4d8bb532009-01-04 05:18:08 -0800457 result = -ENODEV;
458 goto out;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700459 }
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700460
Mike Travis4d8bb532009-01-04 05:18:08 -0800461 /* cpufreq holds the hotplug lock, so we are safe from here on */
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700462 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
Ingo Molnarbfa318a2009-01-15 15:46:08 +0100463 cmd.mask = policy->cpus;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700464 else
Ingo Molnarbfa318a2009-01-15 15:46:08 +0100465 cmd.mask = cpumask_of(policy->cpu);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700466
Venkatesh Pallipadi8edc59d92006-12-19 12:58:55 -0800467 freqs.old = perf->states[perf->state].core_frequency * 1000;
468 freqs.new = data->freq_table[next_state].frequency;
Thomas Renninger6b72e392010-04-20 13:17:35 +0200469 for_each_cpu(i, policy->cpus) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700470 freqs.cpu = i;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500471 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
472 }
473
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700474 drv_write(&cmd);
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500475
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700476 if (acpi_pstate_strict) {
Mike Travis4d8bb532009-01-04 05:18:08 -0800477 if (!check_freqs(cmd.mask, freqs.new, data)) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200478 pr_debug("acpi_cpufreq_target failed (%d)\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700479 policy->cpu);
Mike Travis4d8bb532009-01-04 05:18:08 -0800480 result = -EAGAIN;
481 goto out;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500482 }
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500483 }
484
Thomas Renninger6b72e392010-04-20 13:17:35 +0200485 for_each_cpu(i, policy->cpus) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700486 freqs.cpu = i;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500487 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
488 }
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700489 perf->state = next_perf_state;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500490
Mike Travis4d8bb532009-01-04 05:18:08 -0800491out:
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700492 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
494
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700495static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496{
Tejun Heof1625062009-10-29 22:34:13 +0900497 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200499 pr_debug("acpi_cpufreq_verify\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700501 return cpufreq_frequency_table_verify(policy, data->freq_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502}
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504static unsigned long
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700505acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700507 struct acpi_processor_performance *perf = data->acpi_data;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 if (cpu_khz) {
510 /* search the closest match to cpu_khz */
511 unsigned int i;
512 unsigned long freq;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500513 unsigned long freqn = perf->states[0].core_frequency * 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Dave Jones3a58df32009-01-17 22:36:14 -0500515 for (i = 0; i < (perf->state_count-1); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 freq = freqn;
Dave Jones95dd7222006-10-18 00:41:48 -0400517 freqn = perf->states[i+1].core_frequency * 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if ((2 * cpu_khz) > (freqn + freq)) {
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500519 perf->state = i;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700520 return freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
522 }
Dave Jones95dd7222006-10-18 00:41:48 -0400523 perf->state = perf->state_count-1;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700524 return freqn;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500525 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 /* assume CPU is at P0... */
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500527 perf->state = 0;
528 return perf->states[0].core_frequency * 1000;
529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530}
531
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800532static void free_acpi_perf_data(void)
533{
534 unsigned int i;
535
536 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
537 for_each_possible_cpu(i)
538 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
539 ->shared_cpu_map);
540 free_percpu(acpi_perf_data);
541}
542
Andre Przywara615b7302012-09-04 08:28:07 +0000543static int boost_notify(struct notifier_block *nb, unsigned long action,
544 void *hcpu)
545{
546 unsigned cpu = (long)hcpu;
547 const struct cpumask *cpumask;
548
549 cpumask = get_cpu_mask(cpu);
550
551 /*
552 * Clear the boost-disable bit on the CPU_DOWN path so that
553 * this cpu cannot block the remaining ones from boosting. On
554 * the CPU_UP path we simply keep the boost-disable flag in
555 * sync with the current global state.
556 */
557
558 switch (action) {
559 case CPU_UP_PREPARE:
560 case CPU_UP_PREPARE_FROZEN:
561 boost_set_msrs(boost_enabled, cpumask);
562 break;
563
564 case CPU_DOWN_PREPARE:
565 case CPU_DOWN_PREPARE_FROZEN:
566 boost_set_msrs(1, cpumask);
567 break;
568
569 default:
570 break;
571 }
572
573 return NOTIFY_OK;
574}
575
576
577static struct notifier_block boost_nb = {
578 .notifier_call = boost_notify,
579};
580
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500581/*
582 * acpi_cpufreq_early_init - initialize ACPI P-States library
583 *
584 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
585 * in order to determine correct frequency and voltage pairings. We can
586 * do _PDC and _PSD and find out the processor dependency for the
587 * actual init that will happen later...
588 */
Fenghua Yu50109292007-08-07 18:40:30 -0400589static int __init acpi_cpufreq_early_init(void)
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500590{
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800591 unsigned int i;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200592 pr_debug("acpi_cpufreq_early_init\n");
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500593
Fenghua Yu50109292007-08-07 18:40:30 -0400594 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
595 if (!acpi_perf_data) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200596 pr_debug("Memory allocation error for acpi_perf_data.\n");
Fenghua Yu50109292007-08-07 18:40:30 -0400597 return -ENOMEM;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500598 }
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800599 for_each_possible_cpu(i) {
Yinghai Lueaa95842009-06-06 14:51:36 -0700600 if (!zalloc_cpumask_var_node(
Mike Travis80855f72008-12-31 18:08:47 -0800601 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
602 GFP_KERNEL, cpu_to_node(i))) {
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800603
604 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
605 free_acpi_perf_data();
606 return -ENOMEM;
607 }
608 }
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500609
610 /* Do initialization in ACPI core */
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700611 acpi_processor_preregister_performance(acpi_perf_data);
612 return 0;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500613}
614
Dave Jones95625b82006-10-21 01:37:39 -0400615#ifdef CONFIG_SMP
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700616/*
617 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
618 * or do it in BIOS firmware and won't inform about it to OS. If not
619 * detected, this has a side effect of making CPU run at a different speed
620 * than OS intended it to run at. Detect it and handle it cleanly.
621 */
622static int bios_with_sw_any_bug;
623
Jeff Garzik18552562007-10-03 15:15:40 -0400624static int sw_any_bug_found(const struct dmi_system_id *d)
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700625{
626 bios_with_sw_any_bug = 1;
627 return 0;
628}
629
Jeff Garzik18552562007-10-03 15:15:40 -0400630static const struct dmi_system_id sw_any_bug_dmi_table[] = {
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700631 {
632 .callback = sw_any_bug_found,
633 .ident = "Supermicro Server X6DLP",
634 .matches = {
635 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
636 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
637 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
638 },
639 },
640 { }
641};
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400642
643static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
644{
John Villalovos293afe42009-09-25 13:30:08 -0400645 /* Intel Xeon Processor 7100 Series Specification Update
646 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400647 * AL30: A Machine Check Exception (MCE) Occurring during an
648 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
John Villalovos293afe42009-09-25 13:30:08 -0400649 * Both Processor Cores to Lock Up. */
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400650 if (c->x86_vendor == X86_VENDOR_INTEL) {
651 if ((c->x86 == 15) &&
652 (c->x86_model == 6) &&
John Villalovos293afe42009-09-25 13:30:08 -0400653 (c->x86_mask == 8)) {
654 printk(KERN_INFO "acpi-cpufreq: Intel(R) "
655 "Xeon(R) 7100 Errata AL30, processors may "
656 "lock up on frequency changes: disabling "
657 "acpi-cpufreq.\n");
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400658 return -ENODEV;
John Villalovos293afe42009-09-25 13:30:08 -0400659 }
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400660 }
661 return 0;
662}
Dave Jones95625b82006-10-21 01:37:39 -0400663#endif
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700664
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700665static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700667 unsigned int i;
668 unsigned int valid_states = 0;
669 unsigned int cpu = policy->cpu;
670 struct acpi_cpufreq_data *data;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700671 unsigned int result = 0;
Mike Travis92cb7612007-10-19 20:35:04 +0200672 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700673 struct acpi_processor_performance *perf;
John Villalovos293afe42009-09-25 13:30:08 -0400674#ifdef CONFIG_SMP
675 static int blacklisted;
676#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200678 pr_debug("acpi_cpufreq_cpu_init\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400680#ifdef CONFIG_SMP
John Villalovos293afe42009-09-25 13:30:08 -0400681 if (blacklisted)
682 return blacklisted;
683 blacklisted = acpi_cpufreq_blacklist(c);
684 if (blacklisted)
685 return blacklisted;
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400686#endif
687
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700688 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 if (!data)
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700690 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Rusty Russellb36128c2009-02-20 16:29:08 +0900692 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
Tejun Heof1625062009-10-29 22:34:13 +0900693 per_cpu(acfreq_data, cpu) = data;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700694
Dave Jones95dd7222006-10-18 00:41:48 -0400695 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700696 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500698 result = acpi_processor_register_performance(data->acpi_data, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (result)
700 goto err_free;
701
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500702 perf = data->acpi_data;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500703 policy->shared_type = perf->shared_type;
Dave Jones95dd7222006-10-18 00:41:48 -0400704
Venkatesh Pallipadi46f18e32006-06-26 00:34:43 -0400705 /*
Dave Jones95dd7222006-10-18 00:41:48 -0400706 * Will let policy->cpus know about dependency only when software
Venkatesh Pallipadi46f18e32006-06-26 00:34:43 -0400707 * coordination is required.
708 */
709 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700710 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
Rusty Russell835481d2009-01-04 05:18:06 -0800711 cpumask_copy(policy->cpus, perf->shared_cpu_map);
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700712 }
Rusty Russell835481d2009-01-04 05:18:06 -0800713 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700714
715#ifdef CONFIG_SMP
716 dmi_check_system(sw_any_bug_dmi_table);
Rusty Russell835481d2009-01-04 05:18:06 -0800717 if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700718 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
Rusty Russell835481d2009-01-04 05:18:06 -0800719 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700720 }
Andre Przywaraacd31622012-09-04 08:28:03 +0000721
722 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
723 cpumask_clear(policy->cpus);
724 cpumask_set_cpu(cpu, policy->cpus);
725 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
726 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
727 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
728 }
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700729#endif
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 /* capability check */
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500732 if (perf->state_count <= 1) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200733 pr_debug("No P-States\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 result = -ENODEV;
735 goto err_unreg;
736 }
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500737
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700738 if (perf->control_register.space_id != perf->status_register.space_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 result = -ENODEV;
740 goto err_unreg;
741 }
742
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700743 switch (perf->control_register.space_id) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700744 case ACPI_ADR_SPACE_SYSTEM_IO:
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200745 pr_debug("SYSTEM IO addr space\n");
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700746 data->cpu_feature = SYSTEM_IO_CAPABLE;
747 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700748 case ACPI_ADR_SPACE_FIXED_HARDWARE:
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200749 pr_debug("HARDWARE addr space\n");
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000750 if (check_est_cpu(cpu)) {
751 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
752 break;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700753 }
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000754 if (check_amd_hwpstate_cpu(cpu)) {
755 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
756 break;
757 }
758 result = -ENODEV;
759 goto err_unreg;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700760 default:
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200761 pr_debug("Unknown addr space %d\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700762 (u32) (perf->control_register.space_id));
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700763 result = -ENODEV;
764 goto err_unreg;
765 }
766
Dave Jones95dd7222006-10-18 00:41:48 -0400767 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
768 (perf->state_count+1), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 if (!data->freq_table) {
770 result = -ENOMEM;
771 goto err_unreg;
772 }
773
774 /* detect transition latency */
775 policy->cpuinfo.transition_latency = 0;
Dave Jones3a58df32009-01-17 22:36:14 -0500776 for (i = 0; i < perf->state_count; i++) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700777 if ((perf->states[i].transition_latency * 1000) >
778 policy->cpuinfo.transition_latency)
779 policy->cpuinfo.transition_latency =
780 perf->states[i].transition_latency * 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
Pallipadi, Venkatesha59d1632009-03-19 14:41:40 -0700783 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
784 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
785 policy->cpuinfo.transition_latency > 20 * 1000) {
Pallipadi, Venkatesha59d1632009-03-19 14:41:40 -0700786 policy->cpuinfo.transition_latency = 20 * 1000;
Joe Perches61c8c672009-05-26 14:58:39 -0700787 printk_once(KERN_INFO
788 "P-state transition latency capped at 20 uS\n");
Pallipadi, Venkatesha59d1632009-03-19 14:41:40 -0700789 }
790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 /* table init */
Dave Jones3a58df32009-01-17 22:36:14 -0500792 for (i = 0; i < perf->state_count; i++) {
793 if (i > 0 && perf->states[i].core_frequency >=
Zhang Rui3cdf5522007-06-13 21:24:02 -0400794 data->freq_table[valid_states-1].frequency / 1000)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700795 continue;
796
797 data->freq_table[valid_states].index = i;
798 data->freq_table[valid_states].frequency =
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700799 perf->states[i].core_frequency * 1000;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700800 valid_states++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 }
Venkatesh Pallipadi3d4a7ef2006-11-13 17:47:44 -0800802 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
Venkatesh Pallipadi8edc59d92006-12-19 12:58:55 -0800803 perf->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
805 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
Dave Jones95dd7222006-10-18 00:41:48 -0400806 if (result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 goto err_freqfree;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Thomas Renningerd876dfb2009-04-17 16:22:08 +0200809 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
810 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
811
Mattia Dongilia507ac42006-12-15 19:52:45 +0100812 switch (perf->control_register.space_id) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700813 case ACPI_ADR_SPACE_SYSTEM_IO:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700814 /* Current speed is unknown and not detectable by IO port */
815 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
816 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700817 case ACPI_ADR_SPACE_FIXED_HARDWARE:
Venkatesh Pallipadi7650b282006-10-03 12:36:30 -0700818 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
Mattia Dongilia507ac42006-12-15 19:52:45 +0100819 policy->cur = get_cur_freq_on_cpu(cpu);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700820 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700821 default:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700822 break;
823 }
824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 /* notify BIOS that we exist */
826 acpi_processor_notify_smm(THIS_MODULE);
827
Venkatesh Pallipadidfde5d62006-10-03 12:38:45 -0700828 /* Check for APERF/MPERF support in hardware */
Matthew Garrett92e03c42011-07-13 17:58:32 -0400829 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
Mark Langsdorfa2fed572010-03-18 18:41:46 +0100830 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
Venkatesh Pallipadidfde5d62006-10-03 12:38:45 -0700831
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200832 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500833 for (i = 0; i < perf->state_count; i++)
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200834 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700835 (i == perf->state ? '*' : ' '), i,
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500836 (u32) perf->states[i].core_frequency,
837 (u32) perf->states[i].power,
838 (u32) perf->states[i].transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
840 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700841
Dominik Brodowski4b31e772005-05-18 13:49:00 -0400842 /*
843 * the first call to ->target() should result in us actually
844 * writing something to the appropriate registers.
845 */
846 data->resume = 1;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700847
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700848 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
Dave Jones95dd7222006-10-18 00:41:48 -0400850err_freqfree:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 kfree(data->freq_table);
Dave Jones95dd7222006-10-18 00:41:48 -0400852err_unreg:
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500853 acpi_processor_unregister_performance(perf, cpu);
Dave Jones95dd7222006-10-18 00:41:48 -0400854err_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 kfree(data);
Tejun Heof1625062009-10-29 22:34:13 +0900856 per_cpu(acfreq_data, cpu) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700858 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859}
860
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700861static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862{
Tejun Heof1625062009-10-29 22:34:13 +0900863 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200865 pr_debug("acpi_cpufreq_cpu_exit\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
867 if (data) {
868 cpufreq_frequency_table_put_attr(policy->cpu);
Tejun Heof1625062009-10-29 22:34:13 +0900869 per_cpu(acfreq_data, policy->cpu) = NULL;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700870 acpi_processor_unregister_performance(data->acpi_data,
871 policy->cpu);
Zhang Ruidab5fff2010-10-12 09:09:37 +0800872 kfree(data->freq_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 kfree(data);
874 }
875
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700876 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877}
878
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700879static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
Tejun Heof1625062009-10-29 22:34:13 +0900881 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200883 pr_debug("acpi_cpufreq_resume\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885 data->resume = 1;
886
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700887 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888}
889
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700890static struct freq_attr *acpi_cpufreq_attr[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 &cpufreq_freq_attr_scaling_available_freqs,
892 NULL,
893};
894
895static struct cpufreq_driver acpi_cpufreq_driver = {
Thomas Renningere2f74f32009-11-19 12:31:01 +0100896 .verify = acpi_cpufreq_verify,
897 .target = acpi_cpufreq_target,
898 .bios_limit = acpi_processor_get_bios_limit,
899 .init = acpi_cpufreq_cpu_init,
900 .exit = acpi_cpufreq_cpu_exit,
901 .resume = acpi_cpufreq_resume,
902 .name = "acpi-cpufreq",
903 .owner = THIS_MODULE,
904 .attr = acpi_cpufreq_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905};
906
Andre Przywara615b7302012-09-04 08:28:07 +0000907static void __init acpi_cpufreq_boost_init(void)
908{
909 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
910 msrs = msrs_alloc();
911
912 if (!msrs)
913 return;
914
915 boost_supported = true;
916 boost_enabled = boost_state(0);
917
918 get_online_cpus();
919
920 /* Force all MSRs to the same value */
921 boost_set_msrs(boost_enabled, cpu_online_mask);
922
923 register_cpu_notifier(&boost_nb);
924
925 put_online_cpus();
926 } else
927 global_boost.attr.mode = 0444;
928
929 /* We create the boost file in any case, though for systems without
930 * hardware support it will be read-only and hardwired to return 0.
931 */
932 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
933 pr_warn(PFX "could not register global boost sysfs file\n");
934 else
935 pr_debug("registered global boost sysfs file\n");
936}
937
938static void __exit acpi_cpufreq_boost_exit(void)
939{
940 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
941
942 if (msrs) {
943 unregister_cpu_notifier(&boost_nb);
944
945 msrs_free(msrs);
946 msrs = NULL;
947 }
948}
949
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700950static int __init acpi_cpufreq_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
Fenghua Yu50109292007-08-07 18:40:30 -0400952 int ret;
953
Yinghai Luee297532008-09-24 19:04:31 -0700954 if (acpi_disabled)
955 return 0;
956
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200957 pr_debug("acpi_cpufreq_init\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Fenghua Yu50109292007-08-07 18:40:30 -0400959 ret = acpi_cpufreq_early_init();
960 if (ret)
961 return ret;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500962
Akinobu Mita847aef62008-07-14 11:59:44 +0900963 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
964 if (ret)
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800965 free_acpi_perf_data();
Andre Przywara615b7302012-09-04 08:28:07 +0000966 else
967 acpi_cpufreq_boost_init();
Akinobu Mita847aef62008-07-14 11:59:44 +0900968
969 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970}
971
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700972static void __exit acpi_cpufreq_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973{
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200974 pr_debug("acpi_cpufreq_exit\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
Andre Przywara615b7302012-09-04 08:28:07 +0000976 acpi_cpufreq_boost_exit();
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 cpufreq_unregister_driver(&acpi_cpufreq_driver);
979
Luming Yu50f4ddd2011-07-08 16:37:44 -0400980 free_acpi_perf_data();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981}
982
Venkatesh Pallipadid395bf12005-08-25 15:59:00 -0400983module_param(acpi_pstate_strict, uint, 0644);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700984MODULE_PARM_DESC(acpi_pstate_strict,
Dave Jones95dd7222006-10-18 00:41:48 -0400985 "value 0 or non-zero. non-zero -> strict ACPI checks are "
986 "performed during frequency changes.");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988late_initcall(acpi_cpufreq_init);
989module_exit(acpi_cpufreq_exit);
990
991MODULE_ALIAS("acpi");