Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 2 | /* |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 3 | * Generic entry points for the idle threads and |
| 4 | * implementation of the idle task scheduling class. |
| 5 | * |
| 6 | * (NOTE: these are not related to SCHED_IDLE batch scheduled |
| 7 | * tasks which are handled in sched/fair.c ) |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 8 | */ |
Ingo Molnar | 325ea10 | 2018-03-03 12:20:47 +0100 | [diff] [blame] | 9 | #include "sched.h" |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 10 | |
| 11 | #include <trace/events/power.h> |
| 12 | |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 13 | /* Linker adds these: start and end of __cpuidle functions */ |
| 14 | extern char __cpuidle_text_start[], __cpuidle_text_end[]; |
| 15 | |
Rafael J. Wysocki | faad384 | 2015-05-10 01:18:03 +0200 | [diff] [blame] | 16 | /** |
| 17 | * sched_idle_set_state - Record idle state for the current CPU. |
| 18 | * @idle_state: State to record. |
| 19 | */ |
| 20 | void sched_idle_set_state(struct cpuidle_state *idle_state) |
| 21 | { |
| 22 | idle_set_state(this_rq(), idle_state); |
| 23 | } |
| 24 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 25 | static int __read_mostly cpu_idle_force_poll; |
| 26 | |
| 27 | void cpu_idle_poll_ctrl(bool enable) |
| 28 | { |
| 29 | if (enable) { |
| 30 | cpu_idle_force_poll++; |
| 31 | } else { |
| 32 | cpu_idle_force_poll--; |
| 33 | WARN_ON_ONCE(cpu_idle_force_poll < 0); |
| 34 | } |
| 35 | } |
| 36 | |
| 37 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP |
| 38 | static int __init cpu_idle_poll_setup(char *__unused) |
| 39 | { |
| 40 | cpu_idle_force_poll = 1; |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 41 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 42 | return 1; |
| 43 | } |
| 44 | __setup("nohlt", cpu_idle_poll_setup); |
| 45 | |
| 46 | static int __init cpu_idle_nopoll_setup(char *__unused) |
| 47 | { |
| 48 | cpu_idle_force_poll = 0; |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 49 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 50 | return 1; |
| 51 | } |
| 52 | __setup("hlt", cpu_idle_nopoll_setup); |
| 53 | #endif |
| 54 | |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 55 | static noinline int __cpuidle cpu_idle_poll(void) |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 56 | { |
| 57 | rcu_idle_enter(); |
| 58 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
| 59 | local_irq_enable(); |
Daniel Bristot de Oliveira | 9babcd7 | 2015-10-08 15:36:06 -0300 | [diff] [blame] | 60 | stop_critical_timings(); |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 61 | |
Preeti U Murthy | ff6f2d2 | 2015-01-21 16:27:25 +0530 | [diff] [blame] | 62 | while (!tif_need_resched() && |
| 63 | (cpu_idle_force_poll || tick_check_broadcast_expired())) |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 64 | cpu_relax(); |
Daniel Bristot de Oliveira | 9babcd7 | 2015-10-08 15:36:06 -0300 | [diff] [blame] | 65 | start_critical_timings(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 66 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
| 67 | rcu_idle_exit(); |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 68 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 69 | return 1; |
| 70 | } |
| 71 | |
| 72 | /* Weak implementations for optional arch specific functions */ |
| 73 | void __weak arch_cpu_idle_prepare(void) { } |
| 74 | void __weak arch_cpu_idle_enter(void) { } |
| 75 | void __weak arch_cpu_idle_exit(void) { } |
| 76 | void __weak arch_cpu_idle_dead(void) { } |
| 77 | void __weak arch_cpu_idle(void) |
| 78 | { |
| 79 | cpu_idle_force_poll = 1; |
| 80 | local_irq_enable(); |
| 81 | } |
| 82 | |
Rafael J. Wysocki | 827a5ae | 2015-05-10 01:18:46 +0200 | [diff] [blame] | 83 | /** |
| 84 | * default_idle_call - Default CPU idle routine. |
| 85 | * |
| 86 | * To use when the cpuidle framework cannot be used. |
| 87 | */ |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 88 | void __cpuidle default_idle_call(void) |
Rafael J. Wysocki | 82f6632 | 2015-05-04 22:53:22 +0200 | [diff] [blame] | 89 | { |
Lucas Stach | 63caae8 | 2015-07-20 18:34:50 +0200 | [diff] [blame] | 90 | if (current_clr_polling_and_test()) { |
Rafael J. Wysocki | 82f6632 | 2015-05-04 22:53:22 +0200 | [diff] [blame] | 91 | local_irq_enable(); |
Lucas Stach | 63caae8 | 2015-07-20 18:34:50 +0200 | [diff] [blame] | 92 | } else { |
| 93 | stop_critical_timings(); |
Rafael J. Wysocki | 82f6632 | 2015-05-04 22:53:22 +0200 | [diff] [blame] | 94 | arch_cpu_idle(); |
Lucas Stach | 63caae8 | 2015-07-20 18:34:50 +0200 | [diff] [blame] | 95 | start_critical_timings(); |
| 96 | } |
Rafael J. Wysocki | 82f6632 | 2015-05-04 22:53:22 +0200 | [diff] [blame] | 97 | } |
| 98 | |
Rafael J. Wysocki | bcf6ad8 | 2015-05-04 22:53:35 +0200 | [diff] [blame] | 99 | static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
| 100 | int next_state) |
| 101 | { |
Rafael J. Wysocki | bcf6ad8 | 2015-05-04 22:53:35 +0200 | [diff] [blame] | 102 | /* |
| 103 | * The idle task must be scheduled, it is pointless to go to idle, just |
| 104 | * update no idle residency and return. |
| 105 | */ |
| 106 | if (current_clr_polling_and_test()) { |
Rafael J. Wysocki | c1d51f6 | 2019-11-07 15:25:12 +0100 | [diff] [blame] | 107 | dev->last_residency_ns = 0; |
Rafael J. Wysocki | bcf6ad8 | 2015-05-04 22:53:35 +0200 | [diff] [blame] | 108 | local_irq_enable(); |
| 109 | return -EBUSY; |
| 110 | } |
| 111 | |
Rafael J. Wysocki | bcf6ad8 | 2015-05-04 22:53:35 +0200 | [diff] [blame] | 112 | /* |
| 113 | * Enter the idle state previously returned by the governor decision. |
| 114 | * This function will block until an interrupt occurs and will take |
| 115 | * care of re-enabling the local interrupts |
| 116 | */ |
Rafael J. Wysocki | 827a5ae | 2015-05-10 01:18:46 +0200 | [diff] [blame] | 117 | return cpuidle_enter(drv, dev, next_state); |
Rafael J. Wysocki | bcf6ad8 | 2015-05-04 22:53:35 +0200 | [diff] [blame] | 118 | } |
| 119 | |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 120 | /** |
| 121 | * cpuidle_idle_call - the main idle function |
| 122 | * |
| 123 | * NOTE: no locks or semaphores should be used here |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 124 | * |
| 125 | * On archs that support TIF_POLLING_NRFLAG, is called with polling |
| 126 | * set, and it returns with polling set. If it ever stops polling, it |
| 127 | * must clear the polling bit. |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 128 | */ |
Rafael J. Wysocki | 08c373e | 2014-04-21 01:26:58 +0200 | [diff] [blame] | 129 | static void cpuidle_idle_call(void) |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 130 | { |
Catalin Marinas | 9bd616e | 2016-06-01 18:52:16 +0100 | [diff] [blame] | 131 | struct cpuidle_device *dev = cpuidle_get_device(); |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 132 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 133 | int next_state, entered_state; |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 134 | |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 135 | /* |
| 136 | * Check if the idle task must be rescheduled. If it is the |
Peter Zijlstra | c444117 | 2014-04-11 13:47:16 +0200 | [diff] [blame] | 137 | * case, exit the function after re-enabling the local irq. |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 138 | */ |
Peter Zijlstra | c444117 | 2014-04-11 13:47:16 +0200 | [diff] [blame] | 139 | if (need_resched()) { |
Daniel Lezcano | 8ca3c64 | 2014-03-03 08:48:53 +0100 | [diff] [blame] | 140 | local_irq_enable(); |
Rafael J. Wysocki | 08c373e | 2014-04-21 01:26:58 +0200 | [diff] [blame] | 141 | return; |
Daniel Lezcano | 8ca3c64 | 2014-03-03 08:48:53 +0100 | [diff] [blame] | 142 | } |
| 143 | |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 144 | /* |
Rafael J. Wysocki | ed98c34 | 2018-03-15 23:07:41 +0100 | [diff] [blame] | 145 | * The RCU framework needs to be told that we are entering an idle |
| 146 | * section, so no more rcu read side critical sections and one more |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 147 | * step to the grace period |
| 148 | */ |
Daniel Lezcano | c8cc7d4 | 2014-03-03 08:48:52 +0100 | [diff] [blame] | 149 | |
Rafael J. Wysocki | 82f6632 | 2015-05-04 22:53:22 +0200 | [diff] [blame] | 150 | if (cpuidle_not_available(drv, dev)) { |
Rafael J. Wysocki | ed98c34 | 2018-03-15 23:07:41 +0100 | [diff] [blame] | 151 | tick_nohz_idle_stop_tick(); |
| 152 | rcu_idle_enter(); |
| 153 | |
Rafael J. Wysocki | 82f6632 | 2015-05-04 22:53:22 +0200 | [diff] [blame] | 154 | default_idle_call(); |
| 155 | goto exit_idle; |
| 156 | } |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 157 | |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 158 | /* |
Rafael J. Wysocki | f02f4f9 | 2017-08-10 00:13:56 +0200 | [diff] [blame] | 159 | * Suspend-to-idle ("s2idle") is a system state in which all user space |
Rafael J. Wysocki | 3810631 | 2015-02-12 23:33:15 +0100 | [diff] [blame] | 160 | * has been frozen, all I/O devices have been suspended and the only |
Hewenliang | 3e0de27 | 2020-01-09 21:56:04 -0500 | [diff] [blame] | 161 | * activity happens here and in interrupts (if any). In that case bypass |
Rafael J. Wysocki | 3810631 | 2015-02-12 23:33:15 +0100 | [diff] [blame] | 162 | * the cpuidle governor and go stratight for the deepest idle state |
| 163 | * available. Possibly also suspend the local tick and the entire |
| 164 | * timekeeping to prevent timer interrupts from kicking us out of idle |
| 165 | * until a proper wakeup interrupt happens. |
| 166 | */ |
Jacob Pan | bb8313b | 2016-11-28 23:03:04 -0800 | [diff] [blame] | 167 | |
Daniel Lezcano | c55b51a | 2019-11-16 14:16:12 +0100 | [diff] [blame] | 168 | if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) { |
Daniel Lezcano | 5aa9ba6 | 2019-11-16 14:16:13 +0100 | [diff] [blame] | 169 | u64 max_latency_ns; |
| 170 | |
Rafael J. Wysocki | f02f4f9 | 2017-08-10 00:13:56 +0200 | [diff] [blame] | 171 | if (idle_should_enter_s2idle()) { |
Rafael J. Wysocki | ed98c34 | 2018-03-15 23:07:41 +0100 | [diff] [blame] | 172 | rcu_idle_enter(); |
| 173 | |
Rafael J. Wysocki | 28ba086 | 2017-08-10 00:14:45 +0200 | [diff] [blame] | 174 | entered_state = cpuidle_enter_s2idle(drv, dev); |
Jacob Pan | bb8313b | 2016-11-28 23:03:04 -0800 | [diff] [blame] | 175 | if (entered_state > 0) { |
| 176 | local_irq_enable(); |
| 177 | goto exit_idle; |
| 178 | } |
Rafael J. Wysocki | ed98c34 | 2018-03-15 23:07:41 +0100 | [diff] [blame] | 179 | |
| 180 | rcu_idle_exit(); |
Daniel Lezcano | 5aa9ba6 | 2019-11-16 14:16:13 +0100 | [diff] [blame] | 181 | |
| 182 | max_latency_ns = U64_MAX; |
| 183 | } else { |
| 184 | max_latency_ns = dev->forced_idle_latency_limit_ns; |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 185 | } |
Rafael J. Wysocki | 3810631 | 2015-02-12 23:33:15 +0100 | [diff] [blame] | 186 | |
Rafael J. Wysocki | ed98c34 | 2018-03-15 23:07:41 +0100 | [diff] [blame] | 187 | tick_nohz_idle_stop_tick(); |
| 188 | rcu_idle_enter(); |
| 189 | |
Daniel Lezcano | 5aa9ba6 | 2019-11-16 14:16:13 +0100 | [diff] [blame] | 190 | next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns); |
Rafael J. Wysocki | bcf6ad8 | 2015-05-04 22:53:35 +0200 | [diff] [blame] | 191 | call_cpuidle(drv, dev, next_state); |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 192 | } else { |
Rafael J. Wysocki | 45f1ff5 | 2018-03-22 17:50:49 +0100 | [diff] [blame] | 193 | bool stop_tick = true; |
| 194 | |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 195 | /* |
| 196 | * Ask the cpuidle framework to choose a convenient idle state. |
| 197 | */ |
Rafael J. Wysocki | 45f1ff5 | 2018-03-22 17:50:49 +0100 | [diff] [blame] | 198 | next_state = cpuidle_select(drv, dev, &stop_tick); |
Rafael J. Wysocki | 554c8aa | 2018-04-03 23:17:11 +0200 | [diff] [blame] | 199 | |
Rafael J. Wysocki | 7059b36 | 2018-08-09 19:08:34 +0200 | [diff] [blame] | 200 | if (stop_tick || tick_nohz_tick_stopped()) |
Rafael J. Wysocki | 554c8aa | 2018-04-03 23:17:11 +0200 | [diff] [blame] | 201 | tick_nohz_idle_stop_tick(); |
| 202 | else |
| 203 | tick_nohz_idle_retain_tick(); |
| 204 | |
| 205 | rcu_idle_enter(); |
| 206 | |
Rafael J. Wysocki | bcf6ad8 | 2015-05-04 22:53:35 +0200 | [diff] [blame] | 207 | entered_state = call_cpuidle(drv, dev, next_state); |
| 208 | /* |
| 209 | * Give the governor an opportunity to reflect on the outcome |
| 210 | */ |
Rafael J. Wysocki | ef2b22a | 2015-03-02 22:26:55 +0100 | [diff] [blame] | 211 | cpuidle_reflect(dev, entered_state); |
Rafael J. Wysocki | bcf6ad8 | 2015-05-04 22:53:35 +0200 | [diff] [blame] | 212 | } |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 213 | |
| 214 | exit_idle: |
Daniel Lezcano | 8ca3c64 | 2014-03-03 08:48:53 +0100 | [diff] [blame] | 215 | __current_set_polling(); |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 216 | |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 217 | /* |
Peter Zijlstra | 3735227 | 2014-04-11 13:55:48 +0200 | [diff] [blame] | 218 | * It is up to the idle functions to reenable local interrupts |
Daniel Lezcano | a1d028b | 2014-03-03 08:48:54 +0100 | [diff] [blame] | 219 | */ |
Daniel Lezcano | c8cc7d4 | 2014-03-03 08:48:52 +0100 | [diff] [blame] | 220 | if (WARN_ON_ONCE(irqs_disabled())) |
| 221 | local_irq_enable(); |
| 222 | |
| 223 | rcu_idle_exit(); |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 224 | } |
Daniel Lezcano | 30cdd69 | 2014-03-03 08:48:51 +0100 | [diff] [blame] | 225 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 226 | /* |
| 227 | * Generic idle loop implementation |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 228 | * |
| 229 | * Called with polling cleared. |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 230 | */ |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 231 | static void do_idle(void) |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 232 | { |
Cheng Jian | 54b933c | 2017-10-25 19:28:27 +0800 | [diff] [blame] | 233 | int cpu = smp_processor_id(); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 234 | /* |
| 235 | * If the arch has a polling bit, we maintain an invariant: |
| 236 | * |
| 237 | * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != |
| 238 | * rq->idle). This means that, if rq->idle has the polling bit set, |
| 239 | * then setting need_resched is guaranteed to cause the CPU to |
| 240 | * reschedule. |
| 241 | */ |
Gaurav Jindal (Gaurav Jindal) | df55f46 | 2016-05-12 10:13:33 +0000 | [diff] [blame] | 242 | |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 243 | __current_set_polling(); |
| 244 | tick_nohz_idle_enter(); |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 245 | |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 246 | while (!need_resched()) { |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 247 | rmb(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 248 | |
Peter Zijlstra | e78a761 | 2019-06-05 07:46:43 -0700 | [diff] [blame] | 249 | local_irq_disable(); |
| 250 | |
Cheng Jian | 54b933c | 2017-10-25 19:28:27 +0800 | [diff] [blame] | 251 | if (cpu_is_offline(cpu)) { |
Peter Zijlstra | e78a761 | 2019-06-05 07:46:43 -0700 | [diff] [blame] | 252 | tick_nohz_idle_stop_tick(); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 253 | cpuhp_report_idle_dead(); |
| 254 | arch_cpu_idle_dead(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 255 | } |
Peter Zijlstra | 06d50c6 | 2014-02-24 18:22:07 +0100 | [diff] [blame] | 256 | |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 257 | arch_cpu_idle_enter(); |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 258 | |
| 259 | /* |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 260 | * In poll mode we reenable interrupts and spin. Also if we |
| 261 | * detected in the wakeup from idle path that the tick |
| 262 | * broadcast device expired for us, we don't want to go deep |
| 263 | * idle as we know that the IPI is going to arrive right away. |
Andy Lutomirski | 82c65d6 | 2014-06-04 10:31:16 -0700 | [diff] [blame] | 264 | */ |
Rafael J. Wysocki | 2aaf709 | 2018-03-15 23:05:50 +0100 | [diff] [blame] | 265 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { |
| 266 | tick_nohz_idle_restart_tick(); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 267 | cpu_idle_poll(); |
Rafael J. Wysocki | 2aaf709 | 2018-03-15 23:05:50 +0100 | [diff] [blame] | 268 | } else { |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 269 | cpuidle_idle_call(); |
Rafael J. Wysocki | 2aaf709 | 2018-03-15 23:05:50 +0100 | [diff] [blame] | 270 | } |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 271 | arch_cpu_idle_exit(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 272 | } |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 273 | |
| 274 | /* |
| 275 | * Since we fell out of the loop above, we know TIF_NEED_RESCHED must |
| 276 | * be set, propagate it into PREEMPT_NEED_RESCHED. |
| 277 | * |
| 278 | * This is required because for polling idle loops we will not have had |
| 279 | * an IPI to fold the state for us. |
| 280 | */ |
| 281 | preempt_set_need_resched(); |
| 282 | tick_nohz_idle_exit(); |
| 283 | __current_clr_polling(); |
| 284 | |
| 285 | /* |
| 286 | * We promise to call sched_ttwu_pending() and reschedule if |
| 287 | * need_resched() is set while polling is set. That means that clearing |
| 288 | * polling needs to be visible before doing these things. |
| 289 | */ |
| 290 | smp_mb__after_atomic(); |
| 291 | |
Peter Zijlstra | b2a02fc | 2020-05-26 18:11:01 +0200 | [diff] [blame^] | 292 | /* |
| 293 | * RCU relies on this call to be done outside of an RCU read-side |
| 294 | * critical section. |
| 295 | */ |
| 296 | flush_smp_call_function_from_idle(); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 297 | sched_ttwu_pending(); |
Steven Rostedt (VMware) | 8663eff | 2017-04-14 08:48:09 -0400 | [diff] [blame] | 298 | schedule_idle(); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 299 | |
| 300 | if (unlikely(klp_patch_pending(current))) |
| 301 | klp_update_patch_state(current); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 302 | } |
| 303 | |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 304 | bool cpu_in_idle(unsigned long pc) |
| 305 | { |
| 306 | return pc >= (unsigned long)__cpuidle_text_start && |
| 307 | pc < (unsigned long)__cpuidle_text_end; |
| 308 | } |
| 309 | |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 310 | struct idle_timer { |
| 311 | struct hrtimer timer; |
| 312 | int done; |
| 313 | }; |
| 314 | |
| 315 | static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) |
| 316 | { |
| 317 | struct idle_timer *it = container_of(timer, struct idle_timer, timer); |
| 318 | |
| 319 | WRITE_ONCE(it->done, 1); |
| 320 | set_tsk_need_resched(current); |
| 321 | |
| 322 | return HRTIMER_NORESTART; |
| 323 | } |
| 324 | |
Daniel Lezcano | c55b51a | 2019-11-16 14:16:12 +0100 | [diff] [blame] | 325 | void play_idle_precise(u64 duration_ns, u64 latency_ns) |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 326 | { |
| 327 | struct idle_timer it; |
| 328 | |
| 329 | /* |
| 330 | * Only FIFO tasks can disable the tick since they don't need the forced |
| 331 | * preemption. |
| 332 | */ |
| 333 | WARN_ON_ONCE(current->policy != SCHED_FIFO); |
| 334 | WARN_ON_ONCE(current->nr_cpus_allowed != 1); |
| 335 | WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); |
| 336 | WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); |
Daniel Lezcano | c55b51a | 2019-11-16 14:16:12 +0100 | [diff] [blame] | 337 | WARN_ON_ONCE(!duration_ns); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 338 | |
| 339 | rcu_sleep_check(); |
| 340 | preempt_disable(); |
| 341 | current->flags |= PF_IDLE; |
Daniel Lezcano | c55b51a | 2019-11-16 14:16:12 +0100 | [diff] [blame] | 342 | cpuidle_use_deepest_state(latency_ns); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 343 | |
| 344 | it.done = 0; |
| 345 | hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 346 | it.timer.function = idle_inject_timer_fn; |
Daniel Lezcano | c55b51a | 2019-11-16 14:16:12 +0100 | [diff] [blame] | 347 | hrtimer_start(&it.timer, ns_to_ktime(duration_ns), |
Daniel Lezcano | 82e430a | 2019-08-02 19:34:23 +0200 | [diff] [blame] | 348 | HRTIMER_MODE_REL_PINNED); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 349 | |
| 350 | while (!READ_ONCE(it.done)) |
| 351 | do_idle(); |
| 352 | |
Daniel Lezcano | c55b51a | 2019-11-16 14:16:12 +0100 | [diff] [blame] | 353 | cpuidle_use_deepest_state(0); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 354 | current->flags &= ~PF_IDLE; |
| 355 | |
| 356 | preempt_fold_need_resched(); |
| 357 | preempt_enable(); |
| 358 | } |
Daniel Lezcano | c55b51a | 2019-11-16 14:16:12 +0100 | [diff] [blame] | 359 | EXPORT_SYMBOL_GPL(play_idle_precise); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 360 | |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 361 | void cpu_startup_entry(enum cpuhp_state state) |
| 362 | { |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 363 | arch_cpu_idle_prepare(); |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 364 | cpuhp_online_idle(state); |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 365 | while (1) |
| 366 | do_idle(); |
Nicolas Pitre | cf37b6b | 2014-01-26 23:42:01 -0500 | [diff] [blame] | 367 | } |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 368 | |
| 369 | /* |
| 370 | * idle-task scheduling class. |
| 371 | */ |
| 372 | |
| 373 | #ifdef CONFIG_SMP |
| 374 | static int |
| 375 | select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) |
| 376 | { |
| 377 | return task_cpu(p); /* IDLE tasks as never migrated */ |
| 378 | } |
Peter Zijlstra | 6e2df05 | 2019-11-08 11:11:52 +0100 | [diff] [blame] | 379 | |
| 380 | static int |
| 381 | balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
| 382 | { |
| 383 | return WARN_ON_ONCE(1); |
| 384 | } |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 385 | #endif |
| 386 | |
| 387 | /* |
| 388 | * Idle tasks are unconditionally rescheduled: |
| 389 | */ |
| 390 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) |
| 391 | { |
| 392 | resched_curr(rq); |
| 393 | } |
| 394 | |
Peter Zijlstra | 6e2df05 | 2019-11-08 11:11:52 +0100 | [diff] [blame] | 395 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) |
Peter Zijlstra | 03b7fad | 2019-05-29 20:36:41 +0000 | [diff] [blame] | 396 | { |
| 397 | } |
| 398 | |
Peter Zijlstra | a0e813f | 2019-11-08 14:16:00 +0100 | [diff] [blame] | 399 | static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) |
Peter Zijlstra | 03b7fad | 2019-05-29 20:36:41 +0000 | [diff] [blame] | 400 | { |
| 401 | update_idle_core(rq); |
| 402 | schedstat_inc(rq->sched_goidle); |
| 403 | } |
| 404 | |
Peter Zijlstra | 98c2f70 | 2019-11-08 14:15:58 +0100 | [diff] [blame] | 405 | struct task_struct *pick_next_task_idle(struct rq *rq) |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 406 | { |
Peter Zijlstra | 03b7fad | 2019-05-29 20:36:41 +0000 | [diff] [blame] | 407 | struct task_struct *next = rq->idle; |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 408 | |
Peter Zijlstra | a0e813f | 2019-11-08 14:16:00 +0100 | [diff] [blame] | 409 | set_next_task_idle(rq, next, true); |
Peter Zijlstra | 03b7fad | 2019-05-29 20:36:41 +0000 | [diff] [blame] | 410 | |
| 411 | return next; |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 412 | } |
| 413 | |
| 414 | /* |
| 415 | * It is not legal to sleep in the idle task - print a warning |
| 416 | * message if some code attempts to do it: |
| 417 | */ |
| 418 | static void |
| 419 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) |
| 420 | { |
| 421 | raw_spin_unlock_irq(&rq->lock); |
| 422 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); |
| 423 | dump_stack(); |
| 424 | raw_spin_lock_irq(&rq->lock); |
| 425 | } |
| 426 | |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 427 | /* |
| 428 | * scheduler tick hitting a task of our scheduling class. |
| 429 | * |
| 430 | * NOTE: This function can be called remotely by the tick offload that |
| 431 | * goes along full dynticks. Therefore no local assumption can be made |
| 432 | * and everything must be accessed through the @rq and @curr passed in |
| 433 | * parameters. |
| 434 | */ |
| 435 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) |
| 436 | { |
| 437 | } |
| 438 | |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 439 | static void switched_to_idle(struct rq *rq, struct task_struct *p) |
| 440 | { |
| 441 | BUG(); |
| 442 | } |
| 443 | |
| 444 | static void |
| 445 | prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) |
| 446 | { |
| 447 | BUG(); |
| 448 | } |
| 449 | |
| 450 | static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) |
| 451 | { |
| 452 | return 0; |
| 453 | } |
| 454 | |
| 455 | static void update_curr_idle(struct rq *rq) |
| 456 | { |
| 457 | } |
| 458 | |
| 459 | /* |
| 460 | * Simple, special scheduling class for the per-CPU idle tasks: |
| 461 | */ |
| 462 | const struct sched_class idle_sched_class = { |
| 463 | /* .next is NULL */ |
| 464 | /* no enqueue/yield_task for idle tasks */ |
| 465 | |
| 466 | /* dequeue is not valid, we print a debug message there: */ |
| 467 | .dequeue_task = dequeue_task_idle, |
| 468 | |
| 469 | .check_preempt_curr = check_preempt_curr_idle, |
| 470 | |
| 471 | .pick_next_task = pick_next_task_idle, |
| 472 | .put_prev_task = put_prev_task_idle, |
Peter Zijlstra | 03b7fad | 2019-05-29 20:36:41 +0000 | [diff] [blame] | 473 | .set_next_task = set_next_task_idle, |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 474 | |
| 475 | #ifdef CONFIG_SMP |
Peter Zijlstra | 6e2df05 | 2019-11-08 11:11:52 +0100 | [diff] [blame] | 476 | .balance = balance_idle, |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 477 | .select_task_rq = select_task_rq_idle, |
| 478 | .set_cpus_allowed = set_cpus_allowed_common, |
| 479 | #endif |
| 480 | |
Ingo Molnar | a92057e | 2018-03-03 15:44:39 +0100 | [diff] [blame] | 481 | .task_tick = task_tick_idle, |
| 482 | |
| 483 | .get_rr_interval = get_rr_interval_idle, |
| 484 | |
| 485 | .prio_changed = prio_changed_idle, |
| 486 | .switched_to = switched_to_idle, |
| 487 | .update_curr = update_curr_idle, |
| 488 | }; |