Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Generic entry point for the idle threads |
| 3 | */ |
| 4 | #include <linux/sched.h> |
| 5 | #include <linux/cpu.h> |
| 6 | #include <linux/cpuidle.h> |
| 7 | #include <linux/tick.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/stackprotector.h> |
Rafael J. Wysocki | 3810631 | 2015-02-12 23:33:15 +0100 | [diff] [blame] | 10 | #include <linux/suspend.h> |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 11 | |
| 12 | #include <asm/tlb.h> |
| 13 | |
| 14 | #include <trace/events/power.h> |
| 15 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 16 | #include "sched.h" |
| 17 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 18 | static int __read_mostly cpu_idle_force_poll; |
| 19 | |
| 20 | void cpu_idle_poll_ctrl(bool enable) |
| 21 | { |
| 22 | if (enable) { |
| 23 | cpu_idle_force_poll++; |
| 24 | } else { |
| 25 | cpu_idle_force_poll--; |
| 26 | WARN_ON_ONCE(cpu_idle_force_poll < 0); |
| 27 | } |
| 28 | } |
| 29 | |
| 30 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP |
| 31 | static int __init cpu_idle_poll_setup(char *__unused) |
| 32 | { |
| 33 | cpu_idle_force_poll = 1; |
| 34 | return 1; |
| 35 | } |
| 36 | __setup("nohlt", cpu_idle_poll_setup); |
| 37 | |
| 38 | static int __init cpu_idle_nopoll_setup(char *__unused) |
| 39 | { |
| 40 | cpu_idle_force_poll = 0; |
| 41 | return 1; |
| 42 | } |
| 43 | __setup("hlt", cpu_idle_nopoll_setup); |
| 44 | #endif |
| 45 | |
| 46 | static inline int cpu_idle_poll(void) |
| 47 | { |
| 48 | rcu_idle_enter(); |
| 49 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
| 50 | local_irq_enable(); |
Preeti U Murthy | ff6f2d2 | 2015-01-21 16:27:25 +0530 | [diff] [blame] | 51 | while (!tif_need_resched() && |
| 52 | (cpu_idle_force_poll || tick_check_broadcast_expired())) |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 53 | cpu_relax(); |
| 54 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
| 55 | rcu_idle_exit(); |
| 56 | return 1; |
| 57 | } |
| 58 | |
| 59 | /* Weak implementations for optional arch specific functions */ |
| 60 | void __weak arch_cpu_idle_prepare(void) { } |
| 61 | void __weak arch_cpu_idle_enter(void) { } |
| 62 | void __weak arch_cpu_idle_exit(void) { } |
| 63 | void __weak arch_cpu_idle_dead(void) { } |
| 64 | void __weak arch_cpu_idle(void) |
| 65 | { |
| 66 | cpu_idle_force_poll = 1; |
| 67 | local_irq_enable(); |
| 68 | } |
| 69 | |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 70 | /** |
| 71 | * cpuidle_idle_call - the main idle function |
| 72 | * |
| 73 | * NOTE: no locks or semaphores should be used here |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 74 | * |
| 75 | * On archs that support TIF_POLLING_NRFLAG, is called with polling |
| 76 | * set, and it returns with polling set. If it ever stops polling, it |
| 77 | * must clear the polling bit. |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 78 | */ |
Rafael J. Wysocki | 08c373e | 2014-04-21 01:26:58 +0200 | [diff] [blame] | 79 | static void cpuidle_idle_call(void) |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 80 | { |
| 81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
| 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 83 | int next_state, entered_state; |
Viresh Kumar | 89abb5a | 2014-06-24 10:01:01 +0530 | [diff] [blame] | 84 | unsigned int broadcast; |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 85 | bool reflect; |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 86 | |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 87 | /* |
| 88 | * Check if the idle task must be rescheduled. If it is the |
Peter Zijlstra | c444117 | 2014-04-11 13:47:16 +0200 | [diff] [blame] | 89 | * case, exit the function after re-enabling the local irq. |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 90 | */ |
Peter Zijlstra | c444117 | 2014-04-11 13:47:16 +0200 | [diff] [blame] | 91 | if (need_resched()) { |
Daniel Lezcano | 8ca3c64 | 2014-03-03 08:48:53 +0100 | [diff] [blame] | 92 | local_irq_enable(); |
Rafael J. Wysocki | 08c373e | 2014-04-21 01:26:58 +0200 | [diff] [blame] | 93 | return; |
Daniel Lezcano | 8ca3c64 | 2014-03-03 08:48:53 +0100 | [diff] [blame] | 94 | } |
| 95 | |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 96 | /* |
| 97 | * During the idle period, stop measuring the disabled irqs |
| 98 | * critical sections latencies |
| 99 | */ |
Daniel Lezcano | c8cc7d4 | 2014-03-03 08:48:52 +0100 | [diff] [blame] | 100 | stop_critical_timings(); |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 101 | |
| 102 | /* |
| 103 | * Tell the RCU framework we are entering an idle section, |
| 104 | * so no more rcu read side critical sections and one more |
| 105 | * step to the grace period |
| 106 | */ |
Daniel Lezcano | c8cc7d4 | 2014-03-03 08:48:52 +0100 | [diff] [blame] | 107 | rcu_idle_enter(); |
| 108 | |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 109 | if (cpuidle_not_available(drv, dev)) |
| 110 | goto use_default; |
| 111 | |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 112 | /* |
Rafael J. Wysocki | 3810631 | 2015-02-12 23:33:15 +0100 | [diff] [blame] | 113 | * Suspend-to-idle ("freeze") is a system state in which all user space |
| 114 | * has been frozen, all I/O devices have been suspended and the only |
| 115 | * activity happens here and in iterrupts (if any). In that case bypass |
| 116 | * the cpuidle governor and go stratight for the deepest idle state |
| 117 | * available. Possibly also suspend the local tick and the entire |
| 118 | * timekeeping to prevent timer interrupts from kicking us out of idle |
| 119 | * until a proper wakeup interrupt happens. |
| 120 | */ |
| 121 | if (idle_should_freeze()) { |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 122 | entered_state = cpuidle_enter_freeze(drv, dev); |
| 123 | if (entered_state >= 0) { |
| 124 | local_irq_enable(); |
| 125 | goto exit_idle; |
| 126 | } |
Rafael J. Wysocki | 3810631 | 2015-02-12 23:33:15 +0100 | [diff] [blame] | 127 | |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 128 | reflect = false; |
| 129 | next_state = cpuidle_find_deepest_state(drv, dev); |
| 130 | } else { |
| 131 | reflect = true; |
| 132 | /* |
| 133 | * Ask the cpuidle framework to choose a convenient idle state. |
| 134 | */ |
| 135 | next_state = cpuidle_select(drv, dev); |
| 136 | } |
| 137 | /* Fall back to the default arch idle method on errors. */ |
Rafael J. Wysocki | dfcacc1 | 2015-03-02 22:25:37 +0100 | [diff] [blame] | 138 | if (next_state < 0) |
| 139 | goto use_default; |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 140 | |
| 141 | /* |
| 142 | * The idle task must be scheduled, it is pointless to |
| 143 | * go to idle, just update no idle residency and get |
| 144 | * out of this function |
| 145 | */ |
| 146 | if (current_clr_polling_and_test()) { |
| 147 | dev->last_residency = 0; |
| 148 | entered_state = next_state; |
| 149 | local_irq_enable(); |
| 150 | goto exit_idle; |
Peter Zijlstra | c444117 | 2014-04-11 13:47:16 +0200 | [diff] [blame] | 151 | } |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 152 | |
Viresh Kumar | 89abb5a | 2014-06-24 10:01:01 +0530 | [diff] [blame] | 153 | broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP; |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 154 | |
| 155 | /* |
| 156 | * Tell the time framework to switch to a broadcast timer |
| 157 | * because our local timer will be shutdown. If a local timer |
| 158 | * is used from another cpu as a broadcast timer, this call may |
| 159 | * fail if it is not available |
| 160 | */ |
Thomas Gleixner | 335f491 | 2015-04-03 02:34:49 +0200 | [diff] [blame] | 161 | if (broadcast && tick_broadcast_enter()) |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 162 | goto use_default; |
| 163 | |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 164 | /* Take note of the planned idle state. */ |
| 165 | idle_set_state(this_rq(), &drv->states[next_state]); |
| 166 | |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 167 | /* |
| 168 | * Enter the idle state previously returned by the governor decision. |
| 169 | * This function will block until an interrupt occurs and will take |
| 170 | * care of re-enabling the local interrupts |
| 171 | */ |
| 172 | entered_state = cpuidle_enter(drv, dev, next_state); |
| 173 | |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 174 | /* The cpu is no longer idle or about to enter idle. */ |
| 175 | idle_set_state(this_rq(), NULL); |
| 176 | |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 177 | if (broadcast) |
Thomas Gleixner | 335f491 | 2015-04-03 02:34:49 +0200 | [diff] [blame] | 178 | tick_broadcast_exit(); |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 179 | |
| 180 | /* |
| 181 | * Give the governor an opportunity to reflect on the outcome |
| 182 | */ |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 183 | if (reflect) |
| 184 | cpuidle_reflect(dev, entered_state); |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 185 | |
| 186 | exit_idle: |
Daniel Lezcano | 8ca3c64 | 2014-03-03 08:48:53 +0100 | [diff] [blame] | 187 | __current_set_polling(); |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 188 | |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 189 | /* |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 190 | * It is up to the idle functions to reenable local interrupts |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 191 | */ |
Daniel Lezcano | c8cc7d4 | 2014-03-03 08:48:52 +0100 | [diff] [blame] | 192 | if (WARN_ON_ONCE(irqs_disabled())) |
| 193 | local_irq_enable(); |
| 194 | |
| 195 | rcu_idle_exit(); |
| 196 | start_critical_timings(); |
Rafael J. Wysocki | dfcacc1 | 2015-03-02 22:25:37 +0100 | [diff] [blame] | 197 | return; |
| 198 | |
| 199 | use_default: |
| 200 | /* |
| 201 | * We can't use the cpuidle framework, let's use the default |
| 202 | * idle routine. |
| 203 | */ |
| 204 | if (current_clr_polling_and_test()) |
| 205 | local_irq_enable(); |
| 206 | else |
| 207 | arch_cpu_idle(); |
| 208 | |
| 209 | goto exit_idle; |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 210 | } |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 211 | |
Paul E. McKenney | 528a25b | 2015-01-28 14:09:43 -0800 | [diff] [blame] | 212 | DEFINE_PER_CPU(bool, cpu_dead_idle); |
| 213 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 214 | /* |
| 215 | * Generic idle loop implementation |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 216 | * |
| 217 | * Called with polling cleared. |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 218 | */ |
| 219 | static void cpu_idle_loop(void) |
| 220 | { |
| 221 | while (1) { |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 222 | /* |
| 223 | * If the arch has a polling bit, we maintain an invariant: |
| 224 | * |
| 225 | * Our polling bit is clear if we're not scheduled (i.e. if |
| 226 | * rq->curr != rq->idle). This means that, if rq->idle has |
| 227 | * the polling bit set, then setting need_resched is |
| 228 | * guaranteed to cause the cpu to reschedule. |
| 229 | */ |
| 230 | |
| 231 | __current_set_polling(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 232 | tick_nohz_idle_enter(); |
| 233 | |
| 234 | while (!need_resched()) { |
| 235 | check_pgt_cache(); |
| 236 | rmb(); |
| 237 | |
Paul E. McKenney | 528a25b | 2015-01-28 14:09:43 -0800 | [diff] [blame] | 238 | if (cpu_is_offline(smp_processor_id())) { |
Paul E. McKenney | 88428cc | 2015-01-28 14:42:09 -0800 | [diff] [blame] | 239 | rcu_cpu_notify(NULL, CPU_DYING_IDLE, |
| 240 | (void *)(long)smp_processor_id()); |
Paul E. McKenney | 528a25b | 2015-01-28 14:09:43 -0800 | [diff] [blame] | 241 | smp_mb(); /* all activity before dead. */ |
| 242 | this_cpu_write(cpu_dead_idle, true); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 243 | arch_cpu_idle_dead(); |
Paul E. McKenney | 528a25b | 2015-01-28 14:09:43 -0800 | [diff] [blame] | 244 | } |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 245 | |
| 246 | local_irq_disable(); |
| 247 | arch_cpu_idle_enter(); |
| 248 | |
| 249 | /* |
| 250 | * In poll mode we reenable interrupts and spin. |
| 251 | * |
| 252 | * Also if we detected in the wakeup from idle |
| 253 | * path that the tick broadcast device expired |
| 254 | * for us, we don't want to go deep idle as we |
| 255 | * know that the IPI is going to arrive right |
| 256 | * away |
| 257 | */ |
Daniel Lezcano | 8ca3c64 | 2014-03-03 08:48:53 +0100 | [diff] [blame] | 258 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 259 | cpu_idle_poll(); |
Daniel Lezcano | 8ca3c64 | 2014-03-03 08:48:53 +0100 | [diff] [blame] | 260 | else |
| 261 | cpuidle_idle_call(); |
| 262 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 263 | arch_cpu_idle_exit(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 264 | } |
Peter Zijlstra | 06d50c6 | 2014-02-24 18:22:07 +0100 | [diff] [blame] | 265 | |
| 266 | /* |
| 267 | * Since we fell out of the loop above, we know |
| 268 | * TIF_NEED_RESCHED must be set, propagate it into |
| 269 | * PREEMPT_NEED_RESCHED. |
| 270 | * |
| 271 | * This is required because for polling idle loops we will |
| 272 | * not have had an IPI to fold the state for us. |
| 273 | */ |
| 274 | preempt_set_need_resched(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 275 | tick_nohz_idle_exit(); |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 276 | __current_clr_polling(); |
| 277 | |
| 278 | /* |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 279 | * We promise to call sched_ttwu_pending and reschedule |
| 280 | * if need_resched is set while polling is set. That |
| 281 | * means that clearing polling needs to be visible |
| 282 | * before doing these things. |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 283 | */ |
| 284 | smp_mb__after_atomic(); |
| 285 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 286 | sched_ttwu_pending(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 287 | schedule_preempt_disabled(); |
| 288 | } |
| 289 | } |
| 290 | |
| 291 | void cpu_startup_entry(enum cpuhp_state state) |
| 292 | { |
| 293 | /* |
| 294 | * This #ifdef needs to die, but it's too late in the cycle to |
| 295 | * make this generic (arm and sh have never invoked the canary |
| 296 | * init for the non boot cpus!). Will be fixed in 3.11 |
| 297 | */ |
| 298 | #ifdef CONFIG_X86 |
| 299 | /* |
| 300 | * If we're the non-boot CPU, nothing set the stack canary up |
| 301 | * for us. The boot CPU already has it initialized but no harm |
| 302 | * in doing it again. This is a good place for updating it, as |
| 303 | * we wont ever return from this function (so the invalid |
| 304 | * canaries already on the stack wont ever trigger). |
| 305 | */ |
| 306 | boot_init_stack_canary(); |
| 307 | #endif |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 308 | arch_cpu_idle_prepare(); |
| 309 | cpu_idle_loop(); |
| 310 | } |