blob: f571a6e08710ffb6ae2d779dca46a9c5404a6033 [file] [log] [blame]
Joe Perchesc767a542012-05-21 19:50:07 -07001#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
Suresh Siddha61c46282008-03-10 15:28:04 -07003#include <linux/errno.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/smp.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -08007#include <linux/prctl.h>
Suresh Siddha61c46282008-03-10 15:28:04 -07008#include <linux/slab.h>
9#include <linux/sched.h>
Peter Zijlstra7f424a82008-04-25 17:39:01 +020010#include <linux/module.h>
11#include <linux/pm.h>
Thomas Gleixneraa276e12008-06-09 19:15:00 +020012#include <linux/clockchips.h>
Amerigo Wang9d62dcd2009-05-11 22:05:28 -040013#include <linux/random.h>
Avi Kivity7c68af62009-09-19 09:40:22 +030014#include <linux/user-return-notifier.h>
Andy Isaacson814e2c82009-12-08 00:29:42 -080015#include <linux/dmi.h>
16#include <linux/utsname.h>
Richard Weinberger90e24012012-03-25 23:00:04 +020017#include <linux/stackprotector.h>
18#include <linux/tick.h>
19#include <linux/cpuidle.h>
Arjan van de Ven61613522009-09-17 16:11:28 +020020#include <trace/events/power.h>
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020021#include <linux/hw_breakpoint.h>
Borislav Petkov93789b32011-01-20 15:42:52 +010022#include <asm/cpu.h>
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +010023#include <asm/apic.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +053024#include <asm/syscalls.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080025#include <asm/idle.h>
26#include <asm/uaccess.h>
27#include <asm/i387.h>
Linus Torvalds1361b832012-02-21 13:19:22 -080028#include <asm/fpu-internal.h>
K.Prasad66cb5912009-06-01 23:44:55 +053029#include <asm/debugreg.h>
Richard Weinberger90e24012012-03-25 23:00:04 +020030#include <asm/nmi.h>
31
Thomas Gleixner45046892012-05-03 09:03:01 +000032/*
33 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
34 * no more per-task TSS's. The TSS size is kept cacheline-aligned
35 * so they are allowed to end up in the .data..cacheline_aligned
36 * section. Since TSS's are completely CPU-local, we want them
37 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
38 */
39DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
40
Richard Weinberger90e24012012-03-25 23:00:04 +020041#ifdef CONFIG_X86_64
42static DEFINE_PER_CPU(unsigned char, is_idle);
43static ATOMIC_NOTIFIER_HEAD(idle_notifier);
44
45void idle_notifier_register(struct notifier_block *n)
46{
47 atomic_notifier_chain_register(&idle_notifier, n);
48}
49EXPORT_SYMBOL_GPL(idle_notifier_register);
50
51void idle_notifier_unregister(struct notifier_block *n)
52{
53 atomic_notifier_chain_unregister(&idle_notifier, n);
54}
55EXPORT_SYMBOL_GPL(idle_notifier_unregister);
56#endif
Zhao Yakuic1e3b372008-06-24 17:58:53 +080057
Suresh Siddhaaa283f42008-03-10 15:28:05 -070058struct kmem_cache *task_xstate_cachep;
Sheng Yang5ee481d2010-05-17 17:22:23 +080059EXPORT_SYMBOL_GPL(task_xstate_cachep);
Suresh Siddha61c46282008-03-10 15:28:04 -070060
Suresh Siddha55ccf3f2012-05-16 15:03:51 -070061/*
62 * this gets called so that we can store lazy state into memory and copy the
63 * current task into the new thread.
64 */
Suresh Siddha61c46282008-03-10 15:28:04 -070065int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
66{
Avi Kivity86603282010-05-06 11:45:46 +030067 int ret;
68
Suresh Siddha61c46282008-03-10 15:28:04 -070069 *dst = *src;
Avi Kivity86603282010-05-06 11:45:46 +030070 if (fpu_allocated(&src->thread.fpu)) {
71 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
72 ret = fpu_alloc(&dst->thread.fpu);
73 if (ret)
74 return ret;
Suresh Siddha304bced2012-08-24 14:13:02 -070075 fpu_copy(dst, src);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070076 }
Suresh Siddha61c46282008-03-10 15:28:04 -070077 return 0;
78}
79
Suresh Siddhaaa283f42008-03-10 15:28:05 -070080void free_thread_xstate(struct task_struct *tsk)
81{
Avi Kivity86603282010-05-06 11:45:46 +030082 fpu_free(&tsk->thread.fpu);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070083}
84
Thomas Gleixner38e7c572012-05-05 15:05:42 +000085void arch_release_task_struct(struct task_struct *tsk)
Suresh Siddha61c46282008-03-10 15:28:04 -070086{
Thomas Gleixner38e7c572012-05-05 15:05:42 +000087 free_thread_xstate(tsk);
Suresh Siddha61c46282008-03-10 15:28:04 -070088}
89
90void arch_task_cache_init(void)
91{
92 task_xstate_cachep =
93 kmem_cache_create("task_xstate", xstate_size,
94 __alignof__(union thread_xstate),
Vegard Nossum2dff4402008-05-31 15:56:17 +020095 SLAB_PANIC | SLAB_NOTRACK, NULL);
Suresh Siddha61c46282008-03-10 15:28:04 -070096}
Peter Zijlstra7f424a82008-04-25 17:39:01 +020097
Thomas Gleixner00dba562008-06-09 18:35:28 +020098/*
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080099 * Free current thread data structures etc..
100 */
101void exit_thread(void)
102{
103 struct task_struct *me = current;
104 struct thread_struct *t = &me->thread;
Thomas Gleixner250981e2009-03-16 13:07:21 +0100105 unsigned long *bp = t->io_bitmap_ptr;
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800106
Thomas Gleixner250981e2009-03-16 13:07:21 +0100107 if (bp) {
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800108 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
109
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800110 t->io_bitmap_ptr = NULL;
111 clear_thread_flag(TIF_IO_BITMAP);
112 /*
113 * Careful, clear this in the TSS too:
114 */
115 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
116 t->io_bitmap_max = 0;
117 put_cpu();
Thomas Gleixner250981e2009-03-16 13:07:21 +0100118 kfree(bp);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800119 }
Suresh Siddha1dcc8d72012-05-16 15:03:54 -0700120
121 drop_fpu(me);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800122}
123
Andy Isaacson814e2c82009-12-08 00:29:42 -0800124void show_regs_common(void)
125{
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000126 const char *vendor, *product, *board;
Andy Isaacson814e2c82009-12-08 00:29:42 -0800127
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000128 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
129 if (!vendor)
130 vendor = "";
Andy Isaacsona1884b82009-12-08 00:30:21 -0800131 product = dmi_get_system_info(DMI_PRODUCT_NAME);
132 if (!product)
133 product = "";
Andy Isaacson814e2c82009-12-08 00:29:42 -0800134
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000135 /* Board Name is optional */
136 board = dmi_get_system_info(DMI_BOARD_NAME);
137
Joe Perchesc767a542012-05-21 19:50:07 -0700138 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
139 current->pid, current->comm, print_tainted(),
140 init_utsname()->release,
141 (int)strcspn(init_utsname()->version, " "),
142 init_utsname()->version,
143 vendor, product,
144 board ? "/" : "",
145 board ? board : "");
Andy Isaacson814e2c82009-12-08 00:29:42 -0800146}
147
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800148void flush_thread(void)
149{
150 struct task_struct *tsk = current;
151
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200152 flush_ptrace_hw_breakpoint(tsk);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800153 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
Suresh Siddha304bced2012-08-24 14:13:02 -0700154 drop_init_fpu(tsk);
155 /*
156 * Free the FPU state for non xsave platforms. They get reallocated
157 * lazily at the first use.
158 */
Suresh Siddha5d2bd702012-09-06 14:58:52 -0700159 if (!use_eager_fpu())
Suresh Siddha304bced2012-08-24 14:13:02 -0700160 free_thread_xstate(tsk);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800161}
162
163static void hard_disable_TSC(void)
164{
165 write_cr4(read_cr4() | X86_CR4_TSD);
166}
167
168void disable_TSC(void)
169{
170 preempt_disable();
171 if (!test_and_set_thread_flag(TIF_NOTSC))
172 /*
173 * Must flip the CPU state synchronously with
174 * TIF_NOTSC in the current running context.
175 */
176 hard_disable_TSC();
177 preempt_enable();
178}
179
180static void hard_enable_TSC(void)
181{
182 write_cr4(read_cr4() & ~X86_CR4_TSD);
183}
184
185static void enable_TSC(void)
186{
187 preempt_disable();
188 if (test_and_clear_thread_flag(TIF_NOTSC))
189 /*
190 * Must flip the CPU state synchronously with
191 * TIF_NOTSC in the current running context.
192 */
193 hard_enable_TSC();
194 preempt_enable();
195}
196
197int get_tsc_mode(unsigned long adr)
198{
199 unsigned int val;
200
201 if (test_thread_flag(TIF_NOTSC))
202 val = PR_TSC_SIGSEGV;
203 else
204 val = PR_TSC_ENABLE;
205
206 return put_user(val, (unsigned int __user *)adr);
207}
208
209int set_tsc_mode(unsigned int val)
210{
211 if (val == PR_TSC_SIGSEGV)
212 disable_TSC();
213 else if (val == PR_TSC_ENABLE)
214 enable_TSC();
215 else
216 return -EINVAL;
217
218 return 0;
219}
220
221void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
222 struct tss_struct *tss)
223{
224 struct thread_struct *prev, *next;
225
226 prev = &prev_p->thread;
227 next = &next_p->thread;
228
Peter Zijlstraea8e61b2010-03-25 14:51:51 +0100229 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
230 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
231 unsigned long debugctl = get_debugctlmsr();
232
233 debugctl &= ~DEBUGCTLMSR_BTF;
234 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
235 debugctl |= DEBUGCTLMSR_BTF;
236
237 update_debugctlmsr(debugctl);
238 }
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800239
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800240 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
241 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
242 /* prev and next are different */
243 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
244 hard_disable_TSC();
245 else
246 hard_enable_TSC();
247 }
248
249 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
250 /*
251 * Copy the relevant range of the IO bitmap.
252 * Normally this is 128 bytes or less:
253 */
254 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
255 max(prev->io_bitmap_max, next->io_bitmap_max));
256 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
257 /*
258 * Clear any possible leftover bits:
259 */
260 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
261 }
Avi Kivity7c68af62009-09-19 09:40:22 +0300262 propagate_user_return_notify(prev_p, next_p);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800263}
264
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500265/*
Thomas Gleixner00dba562008-06-09 18:35:28 +0200266 * Idle related variables and functions
267 */
Thomas Renningerd1896042010-11-03 17:06:14 +0100268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
Thomas Gleixner00dba562008-06-09 18:35:28 +0200269EXPORT_SYMBOL(boot_option_idle_override);
270
271/*
272 * Powermanagement idle function, if any..
273 */
274void (*pm_idle)(void);
Thomas Gleixner00dba562008-06-09 18:35:28 +0200275
Richard Weinberger90e24012012-03-25 23:00:04 +0200276#ifndef CONFIG_SMP
277static inline void play_dead(void)
278{
279 BUG();
280}
281#endif
282
283#ifdef CONFIG_X86_64
284void enter_idle(void)
285{
Alex Shic6ae41e2012-05-11 15:35:27 +0800286 this_cpu_write(is_idle, 1);
Richard Weinberger90e24012012-03-25 23:00:04 +0200287 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
288}
289
290static void __exit_idle(void)
291{
292 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
293 return;
294 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
295}
296
297/* Called from interrupts to signify idle end */
298void exit_idle(void)
299{
300 /* idle loop has pid 0 */
301 if (current->pid)
302 return;
303 __exit_idle();
304}
305#endif
306
307/*
308 * The idle thread. There's no useful work to be
309 * done, so just try to conserve power and have a
310 * low exit latency (ie sit in a loop waiting for
311 * somebody to say that they'd like to reschedule)
312 */
313void cpu_idle(void)
314{
315 /*
316 * If we're the non-boot CPU, nothing set the stack canary up
317 * for us. CPU0 already has it initialized but no harm in
318 * doing it again. This is a good place for updating it, as
319 * we wont ever return from this function (so the invalid
320 * canaries already on the stack wont ever trigger).
321 */
322 boot_init_stack_canary();
323 current_thread_info()->status |= TS_POLLING;
324
325 while (1) {
326 tick_nohz_idle_enter();
327
328 while (!need_resched()) {
329 rmb();
330
331 if (cpu_is_offline(smp_processor_id()))
332 play_dead();
333
334 /*
335 * Idle routines should keep interrupts disabled
336 * from here on, until they go to idle.
337 * Otherwise, idle callbacks can misfire.
338 */
339 local_touch_nmi();
340 local_irq_disable();
341
342 enter_idle();
343
344 /* Don't trace irqs off for idle */
345 stop_critical_timings();
346
347 /* enter_idle() needs rcu for notifiers */
348 rcu_idle_enter();
349
350 if (cpuidle_idle_call())
351 pm_idle();
352
353 rcu_idle_exit();
354 start_critical_timings();
355
356 /* In many cases the interrupt that ended idle
357 has already called exit_idle. But some idle
358 loops can be woken up without interrupt. */
359 __exit_idle();
360 }
361
362 tick_nohz_idle_exit();
363 preempt_enable_no_resched();
364 schedule();
365 preempt_disable();
366 }
367}
368
Thomas Gleixner00dba562008-06-09 18:35:28 +0200369/*
370 * We use this if we don't have any better
371 * idle routine..
372 */
373void default_idle(void)
374{
Daniel Lezcano4d0e42c2012-10-25 18:13:11 +0200375 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
376 trace_cpu_idle_rcuidle(1, smp_processor_id());
377 current_thread_info()->status &= ~TS_POLLING;
378 /*
379 * TS_POLLING-cleared state must be visible before we
380 * test NEED_RESCHED:
381 */
382 smp_mb();
Thomas Gleixner00dba562008-06-09 18:35:28 +0200383
Daniel Lezcano4d0e42c2012-10-25 18:13:11 +0200384 if (!need_resched())
385 safe_halt(); /* enables interrupts racelessly */
386 else
Thomas Gleixner00dba562008-06-09 18:35:28 +0200387 local_irq_enable();
Daniel Lezcano4d0e42c2012-10-25 18:13:11 +0200388 current_thread_info()->status |= TS_POLLING;
389 trace_power_end_rcuidle(smp_processor_id());
390 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Thomas Gleixner00dba562008-06-09 18:35:28 +0200391}
Andy Whitcroft60b8b1d2011-06-14 12:45:10 -0700392#ifdef CONFIG_APM_MODULE
Thomas Gleixner00dba562008-06-09 18:35:28 +0200393EXPORT_SYMBOL(default_idle);
394#endif
395
Konrad Rzeszutek Wilke5fd47b2011-11-21 18:02:02 -0500396bool set_pm_idle_to_default(void)
397{
398 bool ret = !!pm_idle;
399
400 pm_idle = default_idle;
401
402 return ret;
403}
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100404void stop_this_cpu(void *dummy)
405{
406 local_irq_disable();
407 /*
408 * Remove this CPU:
409 */
Rusty Russell4f062892009-03-13 14:49:54 +1030410 set_cpu_online(smp_processor_id(), false);
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100411 disable_local_APIC();
412
413 for (;;) {
414 if (hlt_works(smp_processor_id()))
415 halt();
416 }
417}
418
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200419/* Default MONITOR/MWAIT with no hints, used for default C1 state */
420static void mwait_idle(void)
421{
422 if (!need_resched()) {
Steven Rostedt48454652012-02-07 09:40:30 -0500423 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
424 trace_cpu_idle_rcuidle(1, smp_processor_id());
Christoph Lameter349c0042011-03-12 12:50:10 +0100425 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
Pallipadi, Venkateshe736ad52009-02-06 16:52:05 -0800426 clflush((void *)&current_thread_info()->flags);
427
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200428 __monitor((void *)&current_thread_info()->flags, 0, 0);
429 smp_mb();
430 if (!need_resched())
431 __sti_mwait(0, 0);
432 else
433 local_irq_enable();
Steven Rostedt48454652012-02-07 09:40:30 -0500434 trace_power_end_rcuidle(smp_processor_id());
435 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200436 } else
437 local_irq_enable();
438}
439
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200440/*
441 * On SMP it's slightly faster (but much more power-consuming!)
442 * to poll the ->work.need_resched flag instead of waiting for the
443 * cross-CPU IPI to arrive. Use this option with caution.
444 */
445static void poll_idle(void)
446{
Steven Rostedt48454652012-02-07 09:40:30 -0500447 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
448 trace_cpu_idle_rcuidle(0, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200449 local_irq_enable();
Joe Korty2c7e9fd2008-08-27 10:35:06 -0400450 while (!need_resched())
451 cpu_relax();
Steven Rostedt48454652012-02-07 09:40:30 -0500452 trace_power_end_rcuidle(smp_processor_id());
453 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200454}
455
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200456/*
457 * mwait selection logic:
458 *
459 * It depends on the CPU. For AMD CPUs that support MWAIT this is
460 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
461 * then depend on a clock divisor and current Pstate of the core. If
462 * all cores of a processor are in halt state (C1) the processor can
463 * enter the C1E (C1 enhanced) state. If mwait is used this will never
464 * happen.
465 *
466 * idle=mwait overrides this decision and forces the usage of mwait.
467 */
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200468
469#define MWAIT_INFO 0x05
470#define MWAIT_ECX_EXTENDED_INFO 0x01
471#define MWAIT_EDX_C1 0xf0
472
Borislav Petkov1c9d16e2011-02-11 18:17:54 +0100473int mwait_usable(const struct cpuinfo_x86 *c)
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200474{
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200475 u32 eax, ebx, ecx, edx;
476
Srivatsa S. Bhat19209bb2012-04-30 12:26:56 +0530477 /* Use mwait if idle=mwait boot option is given */
Thomas Renningerd1896042010-11-03 17:06:14 +0100478 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200479 return 1;
480
Srivatsa S. Bhat19209bb2012-04-30 12:26:56 +0530481 /*
482 * Any idle= boot option other than idle=mwait means that we must not
483 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
484 */
485 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
486 return 0;
487
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200488 if (c->cpuid_level < MWAIT_INFO)
489 return 0;
490
491 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
492 /* Check, whether EDX has extended info about MWAIT */
493 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
494 return 1;
495
496 /*
497 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
498 * C1 supports MWAIT
499 */
500 return (edx & MWAIT_EDX_C1);
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200501}
502
Len Brown02c68a02011-04-01 16:59:53 -0400503bool amd_e400_c1e_detected;
504EXPORT_SYMBOL(amd_e400_c1e_detected);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200505
Len Brown02c68a02011-04-01 16:59:53 -0400506static cpumask_var_t amd_e400_c1e_mask;
Thomas Gleixner4faac972008-09-22 18:54:29 +0200507
Len Brown02c68a02011-04-01 16:59:53 -0400508void amd_e400_remove_cpu(int cpu)
Thomas Gleixner4faac972008-09-22 18:54:29 +0200509{
Len Brown02c68a02011-04-01 16:59:53 -0400510 if (amd_e400_c1e_mask != NULL)
511 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
Thomas Gleixner4faac972008-09-22 18:54:29 +0200512}
513
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200514/*
Len Brown02c68a02011-04-01 16:59:53 -0400515 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200516 * pending message MSR. If we detect C1E, then we handle it the same
517 * way as C3 power states (local apic timer and TSC stop)
518 */
Len Brown02c68a02011-04-01 16:59:53 -0400519static void amd_e400_idle(void)
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200520{
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200521 if (need_resched())
522 return;
523
Len Brown02c68a02011-04-01 16:59:53 -0400524 if (!amd_e400_c1e_detected) {
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200525 u32 lo, hi;
526
527 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
Michal Schmidte8c534e2010-07-27 18:53:35 +0200528
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200529 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
Len Brown02c68a02011-04-01 16:59:53 -0400530 amd_e400_c1e_detected = true;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800531 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
Andreas Herrmann09bfeea2008-09-18 21:12:10 +0200532 mark_tsc_unstable("TSC halt in AMD C1E");
Joe Perchesc767a542012-05-21 19:50:07 -0700533 pr_info("System has AMD C1E enabled\n");
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200534 }
535 }
536
Len Brown02c68a02011-04-01 16:59:53 -0400537 if (amd_e400_c1e_detected) {
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200538 int cpu = smp_processor_id();
539
Len Brown02c68a02011-04-01 16:59:53 -0400540 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
541 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200542 /*
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700543 * Force broadcast so ACPI can not interfere.
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200544 */
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200545 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
546 &cpu);
Joe Perchesc767a542012-05-21 19:50:07 -0700547 pr_info("Switch to broadcast mode on CPU%d\n", cpu);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200548 }
549 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200550
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200551 default_idle();
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200552
553 /*
554 * The switch back from broadcast mode needs to be
555 * called with interrupts disabled.
556 */
557 local_irq_disable();
558 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
559 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200560 } else
561 default_idle();
562}
563
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200564void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
565{
Ingo Molnar3e5095d2009-01-27 17:07:08 +0100566#ifdef CONFIG_SMP
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200567 if (pm_idle == poll_idle && smp_num_siblings > 1) {
Joe Perchesc767a542012-05-21 19:50:07 -0700568 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200569 }
570#endif
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200571 if (pm_idle)
572 return;
573
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200574 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200575 /*
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200576 * One CPU supports mwait => All CPUs supports mwait
577 */
Joe Perchesc767a542012-05-21 19:50:07 -0700578 pr_info("using mwait in idle threads\n");
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200579 pm_idle = mwait_idle;
Hans Rosenfeld9d8888c2010-07-28 19:09:31 +0200580 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
581 /* E400: APIC timer interrupt does not wake up CPU from C1e */
Joe Perchesc767a542012-05-21 19:50:07 -0700582 pr_info("using AMD E400 aware idle routine\n");
Len Brown02c68a02011-04-01 16:59:53 -0400583 pm_idle = amd_e400_idle;
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200584 } else
585 pm_idle = default_idle;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200586}
587
Len Brown02c68a02011-04-01 16:59:53 -0400588void __init init_amd_e400_c1e_mask(void)
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030589{
Len Brown02c68a02011-04-01 16:59:53 -0400590 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
591 if (pm_idle == amd_e400_idle)
592 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030593}
594
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200595static int __init idle_setup(char *str)
596{
Cyrill Gorcunovab6bc3e2008-07-05 15:53:36 +0400597 if (!str)
598 return -EINVAL;
599
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200600 if (!strcmp(str, "poll")) {
Joe Perchesc767a542012-05-21 19:50:07 -0700601 pr_info("using polling idle threads\n");
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200602 pm_idle = poll_idle;
Thomas Renningerd1896042010-11-03 17:06:14 +0100603 boot_option_idle_override = IDLE_POLL;
604 } else if (!strcmp(str, "mwait")) {
605 boot_option_idle_override = IDLE_FORCE_MWAIT;
Linus Torvaldsaf0d6a02011-06-01 02:07:22 +0900606 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
Thomas Renningerd1896042010-11-03 17:06:14 +0100607 } else if (!strcmp(str, "halt")) {
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800608 /*
609 * When the boot option of idle=halt is added, halt is
610 * forced to be used for CPU idle. In such case CPU C2/C3
611 * won't be used again.
612 * To continue to load the CPU idle driver, don't touch
613 * the boot_option_idle_override.
614 */
615 pm_idle = default_idle;
Thomas Renningerd1896042010-11-03 17:06:14 +0100616 boot_option_idle_override = IDLE_HALT;
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800617 } else if (!strcmp(str, "nomwait")) {
618 /*
619 * If the boot option of "idle=nomwait" is added,
620 * it means that mwait will be disabled for CPU C2/C3
621 * states. In such case it won't touch the variable
622 * of boot_option_idle_override.
623 */
Thomas Renningerd1896042010-11-03 17:06:14 +0100624 boot_option_idle_override = IDLE_NOMWAIT;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800625 } else
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200626 return -1;
627
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200628 return 0;
629}
630early_param("idle", idle_setup);
631
Amerigo Wang9d62dcd2009-05-11 22:05:28 -0400632unsigned long arch_align_stack(unsigned long sp)
633{
634 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
635 sp -= get_random_int() % 8192;
636 return sp & ~0xf;
637}
638
639unsigned long arch_randomize_brk(struct mm_struct *mm)
640{
641 unsigned long range_end = mm->brk + 0x02000000;
642 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
643}
644