Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 1 | /* |
| 2 | * The idle loop for all SuperH platforms. |
| 3 | * |
Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 4 | * Copyright (C) 2002 - 2009 Paul Mundt |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/pm.h> |
| 14 | #include <linux/tick.h> |
| 15 | #include <linux/preempt.h> |
| 16 | #include <linux/thread_info.h> |
| 17 | #include <linux/irqflags.h> |
Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 18 | #include <linux/smp.h> |
Len Brown | a0bfa13 | 2011-04-01 19:34:59 -0400 | [diff] [blame] | 19 | #include <linux/cpuidle.h> |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 20 | #include <asm/pgalloc.h> |
| 21 | #include <asm/system.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 22 | #include <linux/atomic.h> |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 23 | #include <asm/smp.h> |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 24 | |
Paul Mundt | c66d3fc | 2011-08-08 16:30:11 +0900 | [diff] [blame] | 25 | void (*pm_idle)(void); |
Paul Mundt | fbb82b0 | 2010-01-20 16:42:52 +0900 | [diff] [blame] | 26 | |
| 27 | static int hlt_counter; |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 28 | |
| 29 | static int __init nohlt_setup(char *__unused) |
| 30 | { |
| 31 | hlt_counter = 1; |
| 32 | return 1; |
| 33 | } |
| 34 | __setup("nohlt", nohlt_setup); |
| 35 | |
| 36 | static int __init hlt_setup(char *__unused) |
| 37 | { |
| 38 | hlt_counter = 0; |
| 39 | return 1; |
| 40 | } |
| 41 | __setup("hlt", hlt_setup); |
| 42 | |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 43 | static inline int hlt_works(void) |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 44 | { |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 45 | return !hlt_counter; |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 46 | } |
| 47 | |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 48 | /* |
| 49 | * On SMP it's slightly faster (but much more power-consuming!) |
| 50 | * to poll the ->work.need_resched flag instead of waiting for the |
| 51 | * cross-CPU IPI to arrive. Use this option with caution. |
| 52 | */ |
| 53 | static void poll_idle(void) |
| 54 | { |
| 55 | local_irq_enable(); |
| 56 | while (!need_resched()) |
| 57 | cpu_relax(); |
| 58 | } |
| 59 | |
| 60 | void default_idle(void) |
| 61 | { |
| 62 | if (hlt_works()) { |
| 63 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 64 | smp_mb__after_clear_bit(); |
| 65 | |
Paul Mundt | 73a38b8 | 2009-12-18 14:40:56 +0900 | [diff] [blame] | 66 | set_bl_bit(); |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 67 | if (!need_resched()) { |
| 68 | local_irq_enable(); |
| 69 | cpu_sleep(); |
Paul Mundt | 9dbe00a | 2009-10-16 17:55:59 +0900 | [diff] [blame] | 70 | } else |
| 71 | local_irq_enable(); |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 72 | |
| 73 | set_thread_flag(TIF_POLLING_NRFLAG); |
Paul Mundt | 73a38b8 | 2009-12-18 14:40:56 +0900 | [diff] [blame] | 74 | clear_bl_bit(); |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 75 | } else |
| 76 | poll_idle(); |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * The idle thread. There's no useful work to be done, so just try to conserve |
| 81 | * power and have a low exit latency (ie sit in a loop waiting for somebody to |
| 82 | * say that they'd like to reschedule) |
| 83 | */ |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 84 | void cpu_idle(void) |
| 85 | { |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 86 | unsigned int cpu = smp_processor_id(); |
| 87 | |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 88 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 89 | |
| 90 | /* endless idle loop with no priority at all */ |
| 91 | while (1) { |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 92 | tick_nohz_idle_enter(); |
| 93 | rcu_idle_enter(); |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 94 | |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 95 | while (!need_resched()) { |
Paul Mundt | 0e6d498 | 2009-10-16 17:27:58 +0900 | [diff] [blame] | 96 | check_pgt_cache(); |
| 97 | rmb(); |
| 98 | |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 99 | if (cpu_is_offline(cpu)) |
| 100 | play_dead(); |
| 101 | |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 102 | local_irq_disable(); |
| 103 | /* Don't trace irqs off for idle */ |
| 104 | stop_critical_timings(); |
David Brown | cbc158d | 2011-08-04 09:24:31 -0700 | [diff] [blame] | 105 | if (cpuidle_idle_call()) |
Len Brown | a0bfa13 | 2011-04-01 19:34:59 -0400 | [diff] [blame] | 106 | pm_idle(); |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 107 | /* |
| 108 | * Sanity check to ensure that pm_idle() returns |
| 109 | * with IRQs enabled |
| 110 | */ |
| 111 | WARN_ON(irqs_disabled()); |
| 112 | start_critical_timings(); |
| 113 | } |
| 114 | |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 115 | rcu_idle_exit(); |
| 116 | tick_nohz_idle_exit(); |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 117 | schedule_preempt_disabled(); |
Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 118 | } |
| 119 | } |
Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 120 | |
Paul Mundt | 90851c4 | 2010-03-23 17:06:47 +0900 | [diff] [blame] | 121 | void __init select_idle_routine(void) |
Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 122 | { |
| 123 | /* |
| 124 | * If a platform has set its own idle routine, leave it alone. |
| 125 | */ |
| 126 | if (pm_idle) |
| 127 | return; |
| 128 | |
| 129 | if (hlt_works()) |
| 130 | pm_idle = default_idle; |
| 131 | else |
| 132 | pm_idle = poll_idle; |
| 133 | } |
| 134 | |
Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 135 | static void do_nothing(void *unused) |
| 136 | { |
| 137 | } |
| 138 | |
Paul Mundt | fbb82b0 | 2010-01-20 16:42:52 +0900 | [diff] [blame] | 139 | void stop_this_cpu(void *unused) |
| 140 | { |
| 141 | local_irq_disable(); |
Paul Mundt | f0ccf27 | 2010-04-26 18:39:50 +0900 | [diff] [blame] | 142 | set_cpu_online(smp_processor_id(), false); |
Paul Mundt | fbb82b0 | 2010-01-20 16:42:52 +0900 | [diff] [blame] | 143 | |
| 144 | for (;;) |
| 145 | cpu_sleep(); |
| 146 | } |
| 147 | |
Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 148 | /* |
| 149 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of |
| 150 | * pm_idle and update to new pm_idle value. Required while changing pm_idle |
| 151 | * handler on SMP systems. |
| 152 | * |
| 153 | * Caller must have changed pm_idle to the new value before the call. Old |
| 154 | * pm_idle value will not be used by any CPU after the return of this function. |
| 155 | */ |
| 156 | void cpu_idle_wait(void) |
| 157 | { |
| 158 | smp_mb(); |
| 159 | /* kick all the CPUs so that they exit out of pm_idle */ |
| 160 | smp_call_function(do_nothing, NULL, 1); |
| 161 | } |
| 162 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |