Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1 | |
| 2 | #include <linux/sched.h> |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 3 | #include <linux/sched/sysctl.h> |
Ingo Molnar | 105ab3d | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 4 | #include <linux/sched/topology.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 5 | #include <linux/sched/rt.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 6 | #include <linux/sched/clock.h> |
Ingo Molnar | 84f001e | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 7 | #include <linux/sched/wake_q.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 8 | #include <linux/sched/signal.h> |
Ingo Molnar | 6a3827d | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 9 | #include <linux/sched/numa_balancing.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 10 | #include <linux/sched/mm.h> |
Ingo Molnar | 55687da | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 11 | #include <linux/sched/cpufreq.h> |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 12 | #include <linux/u64_stats_sync.h> |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 13 | #include <linux/sched/deadline.h> |
Frederic Weisbecker | a499a5a | 2017-01-31 04:09:32 +0100 | [diff] [blame] | 14 | #include <linux/kernel_stat.h> |
Steven Rostedt (Red Hat) | 3866e84 | 2016-02-22 16:26:51 -0500 | [diff] [blame] | 15 | #include <linux/binfmts.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/stop_machine.h> |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 19 | #include <linux/irq_work.h> |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 20 | #include <linux/tick.h> |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 21 | #include <linux/slab.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 22 | |
Ingo Molnar | 7fce777 | 2017-02-02 14:47:27 +0100 | [diff] [blame^] | 23 | #ifdef CONFIG_PARAVIRT |
| 24 | #include <asm/paravirt.h> |
| 25 | #endif |
| 26 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 27 | #include "cpupri.h" |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 28 | #include "cpudeadline.h" |
Li Zefan | 60fed78 | 2013-03-29 14:36:43 +0800 | [diff] [blame] | 29 | #include "cpuacct.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 30 | |
Peter Zijlstra | 9148a3a | 2016-09-20 22:34:51 +0200 | [diff] [blame] | 31 | #ifdef CONFIG_SCHED_DEBUG |
| 32 | #define SCHED_WARN_ON(x) WARN_ONCE(x, #x) |
| 33 | #else |
| 34 | #define SCHED_WARN_ON(x) ((void)(x)) |
| 35 | #endif |
| 36 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 37 | struct rq; |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 38 | struct cpuidle_state; |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 39 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 40 | /* task_struct::on_rq states: */ |
| 41 | #define TASK_ON_RQ_QUEUED 1 |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 42 | #define TASK_ON_RQ_MIGRATING 2 |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 43 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 44 | extern __read_mostly int scheduler_running; |
| 45 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 46 | extern unsigned long calc_load_update; |
| 47 | extern atomic_long_t calc_load_tasks; |
| 48 | |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 49 | extern void calc_global_load_tick(struct rq *this_rq); |
Thomas Gleixner | d60585c | 2016-07-12 18:33:56 +0200 | [diff] [blame] | 50 | extern long calc_load_fold_active(struct rq *this_rq, long adjust); |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 51 | |
| 52 | #ifdef CONFIG_SMP |
Frederic Weisbecker | cee1afc | 2016-04-13 15:56:50 +0200 | [diff] [blame] | 53 | extern void cpu_load_update_active(struct rq *this_rq); |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 54 | #else |
Frederic Weisbecker | cee1afc | 2016-04-13 15:56:50 +0200 | [diff] [blame] | 55 | static inline void cpu_load_update_active(struct rq *this_rq) { } |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 56 | #endif |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 57 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 58 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 59 | * Helpers for converting nanosecond timing to jiffy resolution |
| 60 | */ |
| 61 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
| 62 | |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 63 | /* |
| 64 | * Increase resolution of nice-level calculations for 64-bit architectures. |
| 65 | * The extra resolution improves shares distribution and load balancing of |
| 66 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup |
| 67 | * hierarchies, especially on larger systems. This is not a user-visible change |
| 68 | * and does not change the user-interface for setting shares/weights. |
| 69 | * |
| 70 | * We increase resolution only if we have enough bits to allow this increased |
Peter Zijlstra | 2159197 | 2016-04-28 12:49:38 +0200 | [diff] [blame] | 71 | * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are |
| 72 | * pretty high and the returns do not justify the increased costs. |
| 73 | * |
| 74 | * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to |
| 75 | * increase coverage and consistency always enable it on 64bit platforms. |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 76 | */ |
Peter Zijlstra | 2159197 | 2016-04-28 12:49:38 +0200 | [diff] [blame] | 77 | #ifdef CONFIG_64BIT |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 78 | # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 79 | # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) |
| 80 | # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 81 | #else |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 82 | # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 83 | # define scale_load(w) (w) |
| 84 | # define scale_load_down(w) (w) |
| 85 | #endif |
| 86 | |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 87 | /* |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 88 | * Task weight (visible to users) and its load (invisible to users) have |
| 89 | * independent resolution, but they should be well calibrated. We use |
| 90 | * scale_load() and scale_load_down(w) to convert between them. The |
| 91 | * following must be true: |
| 92 | * |
| 93 | * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD |
| 94 | * |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 95 | */ |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 96 | #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 97 | |
| 98 | /* |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 99 | * Single value that decides SCHED_DEADLINE internal math precision. |
| 100 | * 10 -> just above 1us |
| 101 | * 9 -> just above 0.5us |
| 102 | */ |
| 103 | #define DL_SCALE (10) |
| 104 | |
| 105 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 106 | * These are the 'tuning knobs' of the scheduler: |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 107 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * single value that denotes runtime == period, ie unlimited time. |
| 111 | */ |
| 112 | #define RUNTIME_INF ((u64)~0ULL) |
| 113 | |
Henrik Austad | 20f9cd2 | 2015-09-09 17:00:41 +0200 | [diff] [blame] | 114 | static inline int idle_policy(int policy) |
| 115 | { |
| 116 | return policy == SCHED_IDLE; |
| 117 | } |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 118 | static inline int fair_policy(int policy) |
| 119 | { |
| 120 | return policy == SCHED_NORMAL || policy == SCHED_BATCH; |
| 121 | } |
| 122 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 123 | static inline int rt_policy(int policy) |
| 124 | { |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 125 | return policy == SCHED_FIFO || policy == SCHED_RR; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 126 | } |
| 127 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 128 | static inline int dl_policy(int policy) |
| 129 | { |
| 130 | return policy == SCHED_DEADLINE; |
| 131 | } |
Henrik Austad | 20f9cd2 | 2015-09-09 17:00:41 +0200 | [diff] [blame] | 132 | static inline bool valid_policy(int policy) |
| 133 | { |
| 134 | return idle_policy(policy) || fair_policy(policy) || |
| 135 | rt_policy(policy) || dl_policy(policy); |
| 136 | } |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 137 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 138 | static inline int task_has_rt_policy(struct task_struct *p) |
| 139 | { |
| 140 | return rt_policy(p->policy); |
| 141 | } |
| 142 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 143 | static inline int task_has_dl_policy(struct task_struct *p) |
| 144 | { |
| 145 | return dl_policy(p->policy); |
| 146 | } |
| 147 | |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 148 | /* |
| 149 | * Tells if entity @a should preempt entity @b. |
| 150 | */ |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 151 | static inline bool |
| 152 | dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 153 | { |
| 154 | return dl_time_before(a->deadline, b->deadline); |
| 155 | } |
| 156 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 157 | /* |
| 158 | * This is the priority-queue data structure of the RT scheduling class: |
| 159 | */ |
| 160 | struct rt_prio_array { |
| 161 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| 162 | struct list_head queue[MAX_RT_PRIO]; |
| 163 | }; |
| 164 | |
| 165 | struct rt_bandwidth { |
| 166 | /* nests inside the rq lock: */ |
| 167 | raw_spinlock_t rt_runtime_lock; |
| 168 | ktime_t rt_period; |
| 169 | u64 rt_runtime; |
| 170 | struct hrtimer rt_period_timer; |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 171 | unsigned int rt_period_active; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 172 | }; |
Juri Lelli | a5e7be3 | 2014-09-19 10:22:39 +0100 | [diff] [blame] | 173 | |
| 174 | void __dl_clear_params(struct task_struct *p); |
| 175 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 176 | /* |
| 177 | * To keep the bandwidth of -deadline tasks and groups under control |
| 178 | * we need some place where: |
| 179 | * - store the maximum -deadline bandwidth of the system (the group); |
| 180 | * - cache the fraction of that bandwidth that is currently allocated. |
| 181 | * |
| 182 | * This is all done in the data structure below. It is similar to the |
| 183 | * one used for RT-throttling (rt_bandwidth), with the main difference |
| 184 | * that, since here we are only interested in admission control, we |
| 185 | * do not decrease any runtime while the group "executes", neither we |
| 186 | * need a timer to replenish it. |
| 187 | * |
| 188 | * With respect to SMP, the bandwidth is given on a per-CPU basis, |
| 189 | * meaning that: |
| 190 | * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; |
| 191 | * - dl_total_bw array contains, in the i-eth element, the currently |
| 192 | * allocated bandwidth on the i-eth CPU. |
| 193 | * Moreover, groups consume bandwidth on each CPU, while tasks only |
| 194 | * consume bandwidth on the CPU they're running on. |
| 195 | * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw |
| 196 | * that will be shown the next time the proc or cgroup controls will |
| 197 | * be red. It on its turn can be changed by writing on its own |
| 198 | * control. |
| 199 | */ |
| 200 | struct dl_bandwidth { |
| 201 | raw_spinlock_t dl_runtime_lock; |
| 202 | u64 dl_runtime; |
| 203 | u64 dl_period; |
| 204 | }; |
| 205 | |
| 206 | static inline int dl_bandwidth_enabled(void) |
| 207 | { |
Peter Zijlstra | 1724813 | 2013-12-17 12:44:49 +0100 | [diff] [blame] | 208 | return sysctl_sched_rt_runtime >= 0; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | extern struct dl_bw *dl_bw_of(int i); |
| 212 | |
| 213 | struct dl_bw { |
| 214 | raw_spinlock_t lock; |
| 215 | u64 bw, total_bw; |
| 216 | }; |
| 217 | |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 218 | static inline |
| 219 | void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) |
| 220 | { |
| 221 | dl_b->total_bw -= tsk_bw; |
| 222 | } |
| 223 | |
| 224 | static inline |
| 225 | void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) |
| 226 | { |
| 227 | dl_b->total_bw += tsk_bw; |
| 228 | } |
| 229 | |
| 230 | static inline |
| 231 | bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) |
| 232 | { |
| 233 | return dl_b->bw != -1 && |
| 234 | dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; |
| 235 | } |
| 236 | |
Ingo Molnar | f2cb136 | 2017-02-01 13:10:18 +0100 | [diff] [blame] | 237 | extern void init_dl_bw(struct dl_bw *dl_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 238 | |
| 239 | #ifdef CONFIG_CGROUP_SCHED |
| 240 | |
| 241 | #include <linux/cgroup.h> |
| 242 | |
| 243 | struct cfs_rq; |
| 244 | struct rt_rq; |
| 245 | |
Mike Galbraith | 35cf4e5 | 2012-08-07 05:00:13 +0200 | [diff] [blame] | 246 | extern struct list_head task_groups; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 247 | |
| 248 | struct cfs_bandwidth { |
| 249 | #ifdef CONFIG_CFS_BANDWIDTH |
| 250 | raw_spinlock_t lock; |
| 251 | ktime_t period; |
| 252 | u64 quota, runtime; |
Zhihui Zhang | 9c58c79 | 2014-09-20 21:24:36 -0400 | [diff] [blame] | 253 | s64 hierarchical_quota; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 254 | u64 runtime_expires; |
| 255 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 256 | int idle, period_active; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 257 | struct hrtimer period_timer, slack_timer; |
| 258 | struct list_head throttled_cfs_rq; |
| 259 | |
| 260 | /* statistics */ |
| 261 | int nr_periods, nr_throttled; |
| 262 | u64 throttled_time; |
| 263 | #endif |
| 264 | }; |
| 265 | |
| 266 | /* task group related information */ |
| 267 | struct task_group { |
| 268 | struct cgroup_subsys_state css; |
| 269 | |
| 270 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 271 | /* schedulable entities of this group on each cpu */ |
| 272 | struct sched_entity **se; |
| 273 | /* runqueue "owned" by this group on each cpu */ |
| 274 | struct cfs_rq **cfs_rq; |
| 275 | unsigned long shares; |
| 276 | |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 277 | #ifdef CONFIG_SMP |
Waiman Long | b036762 | 2015-12-02 13:41:49 -0500 | [diff] [blame] | 278 | /* |
| 279 | * load_avg can be heavily contended at clock tick time, so put |
| 280 | * it in its own cacheline separated from the fields above which |
| 281 | * will also be accessed at each tick. |
| 282 | */ |
| 283 | atomic_long_t load_avg ____cacheline_aligned; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 284 | #endif |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 285 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 286 | |
| 287 | #ifdef CONFIG_RT_GROUP_SCHED |
| 288 | struct sched_rt_entity **rt_se; |
| 289 | struct rt_rq **rt_rq; |
| 290 | |
| 291 | struct rt_bandwidth rt_bandwidth; |
| 292 | #endif |
| 293 | |
| 294 | struct rcu_head rcu; |
| 295 | struct list_head list; |
| 296 | |
| 297 | struct task_group *parent; |
| 298 | struct list_head siblings; |
| 299 | struct list_head children; |
| 300 | |
| 301 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 302 | struct autogroup *autogroup; |
| 303 | #endif |
| 304 | |
| 305 | struct cfs_bandwidth cfs_bandwidth; |
| 306 | }; |
| 307 | |
| 308 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 309 | #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
| 310 | |
| 311 | /* |
| 312 | * A weight of 0 or 1 can cause arithmetics problems. |
| 313 | * A weight of a cfs_rq is the sum of weights of which entities |
| 314 | * are queued on this cfs_rq, so a weight of a entity should not be |
| 315 | * too large, so as the shares value of a task group. |
| 316 | * (The default weight is 1024 - so there's no practical |
| 317 | * limitation from this.) |
| 318 | */ |
| 319 | #define MIN_SHARES (1UL << 1) |
| 320 | #define MAX_SHARES (1UL << 18) |
| 321 | #endif |
| 322 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 323 | typedef int (*tg_visitor)(struct task_group *, void *); |
| 324 | |
| 325 | extern int walk_tg_tree_from(struct task_group *from, |
| 326 | tg_visitor down, tg_visitor up, void *data); |
| 327 | |
| 328 | /* |
| 329 | * Iterate the full tree, calling @down when first entering a node and @up when |
| 330 | * leaving it for the final time. |
| 331 | * |
| 332 | * Caller must hold rcu_lock or sufficient equivalent. |
| 333 | */ |
| 334 | static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| 335 | { |
| 336 | return walk_tg_tree_from(&root_task_group, down, up, data); |
| 337 | } |
| 338 | |
| 339 | extern int tg_nop(struct task_group *tg, void *data); |
| 340 | |
| 341 | extern void free_fair_sched_group(struct task_group *tg); |
| 342 | extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); |
Peter Zijlstra | 8663e24 | 2016-06-22 14:58:02 +0200 | [diff] [blame] | 343 | extern void online_fair_sched_group(struct task_group *tg); |
Peter Zijlstra | 6fe1f34 | 2016-01-21 22:24:16 +0100 | [diff] [blame] | 344 | extern void unregister_fair_sched_group(struct task_group *tg); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 345 | extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| 346 | struct sched_entity *se, int cpu, |
| 347 | struct sched_entity *parent); |
| 348 | extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 349 | |
| 350 | extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 77a4d1a | 2015-04-15 11:41:57 +0200 | [diff] [blame] | 351 | extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 352 | extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); |
| 353 | |
| 354 | extern void free_rt_sched_group(struct task_group *tg); |
| 355 | extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); |
| 356 | extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
| 357 | struct sched_rt_entity *rt_se, int cpu, |
| 358 | struct sched_rt_entity *parent); |
| 359 | |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 360 | extern struct task_group *sched_create_group(struct task_group *parent); |
| 361 | extern void sched_online_group(struct task_group *tg, |
| 362 | struct task_group *parent); |
| 363 | extern void sched_destroy_group(struct task_group *tg); |
| 364 | extern void sched_offline_group(struct task_group *tg); |
| 365 | |
| 366 | extern void sched_move_task(struct task_struct *tsk); |
| 367 | |
| 368 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 369 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
Byungchul Park | ad936d8 | 2015-10-24 01:16:19 +0900 | [diff] [blame] | 370 | |
| 371 | #ifdef CONFIG_SMP |
| 372 | extern void set_task_rq_fair(struct sched_entity *se, |
| 373 | struct cfs_rq *prev, struct cfs_rq *next); |
| 374 | #else /* !CONFIG_SMP */ |
| 375 | static inline void set_task_rq_fair(struct sched_entity *se, |
| 376 | struct cfs_rq *prev, struct cfs_rq *next) { } |
| 377 | #endif /* CONFIG_SMP */ |
| 378 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 379 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 380 | #else /* CONFIG_CGROUP_SCHED */ |
| 381 | |
| 382 | struct cfs_bandwidth { }; |
| 383 | |
| 384 | #endif /* CONFIG_CGROUP_SCHED */ |
| 385 | |
| 386 | /* CFS-related fields in a runqueue */ |
| 387 | struct cfs_rq { |
| 388 | struct load_weight load; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 389 | unsigned int nr_running, h_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 390 | |
| 391 | u64 exec_clock; |
| 392 | u64 min_vruntime; |
| 393 | #ifndef CONFIG_64BIT |
| 394 | u64 min_vruntime_copy; |
| 395 | #endif |
| 396 | |
| 397 | struct rb_root tasks_timeline; |
| 398 | struct rb_node *rb_leftmost; |
| 399 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 400 | /* |
| 401 | * 'curr' points to currently running entity on this cfs_rq. |
| 402 | * It is set to NULL otherwise (i.e when none are currently running). |
| 403 | */ |
| 404 | struct sched_entity *curr, *next, *last, *skip; |
| 405 | |
| 406 | #ifdef CONFIG_SCHED_DEBUG |
| 407 | unsigned int nr_spread_over; |
| 408 | #endif |
| 409 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 410 | #ifdef CONFIG_SMP |
| 411 | /* |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 412 | * CFS load tracking |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 413 | */ |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 414 | struct sched_avg avg; |
Yuyang Du | 1396223 | 2015-07-15 08:04:41 +0800 | [diff] [blame] | 415 | u64 runnable_load_sum; |
| 416 | unsigned long runnable_load_avg; |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 417 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 418 | unsigned long tg_load_avg_contrib; |
Vincent Guittot | 09a43ac | 2016-11-08 10:53:45 +0100 | [diff] [blame] | 419 | unsigned long propagate_avg; |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 420 | #endif |
| 421 | atomic_long_t removed_load_avg, removed_util_avg; |
| 422 | #ifndef CONFIG_64BIT |
| 423 | u64 load_last_update_time_copy; |
| 424 | #endif |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 425 | |
Paul Turner | c566e8e | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 426 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 427 | /* |
| 428 | * h_load = weight * f(tg) |
| 429 | * |
| 430 | * Where f(tg) is the recursive weight fraction assigned to |
| 431 | * this group. |
| 432 | */ |
| 433 | unsigned long h_load; |
Vladimir Davydov | 6852079 | 2013-07-15 17:49:19 +0400 | [diff] [blame] | 434 | u64 last_h_load_update; |
| 435 | struct sched_entity *h_load_next; |
| 436 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 437 | #endif /* CONFIG_SMP */ |
| 438 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 439 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 440 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| 441 | |
| 442 | /* |
| 443 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
| 444 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| 445 | * (like users, containers etc.) |
| 446 | * |
| 447 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
| 448 | * list is used during load balance. |
| 449 | */ |
| 450 | int on_list; |
| 451 | struct list_head leaf_cfs_rq_list; |
| 452 | struct task_group *tg; /* group that "owns" this runqueue */ |
| 453 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 454 | #ifdef CONFIG_CFS_BANDWIDTH |
| 455 | int runtime_enabled; |
| 456 | u64 runtime_expires; |
| 457 | s64 runtime_remaining; |
| 458 | |
Paul Turner | f1b1728 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 459 | u64 throttled_clock, throttled_clock_task; |
| 460 | u64 throttled_clock_task_time; |
Peter Zijlstra | 55e16d3 | 2016-06-22 15:14:26 +0200 | [diff] [blame] | 461 | int throttled, throttle_count; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 462 | struct list_head throttled_list; |
| 463 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 464 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 465 | }; |
| 466 | |
| 467 | static inline int rt_bandwidth_enabled(void) |
| 468 | { |
| 469 | return sysctl_sched_rt_runtime >= 0; |
| 470 | } |
| 471 | |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 472 | /* RT IPI pull logic requires IRQ_WORK */ |
| 473 | #ifdef CONFIG_IRQ_WORK |
| 474 | # define HAVE_RT_PUSH_IPI |
| 475 | #endif |
| 476 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 477 | /* Real-Time classes' related field in a runqueue: */ |
| 478 | struct rt_rq { |
| 479 | struct rt_prio_array active; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 480 | unsigned int rt_nr_running; |
Frederic Weisbecker | 01d36d0 | 2015-11-04 18:17:10 +0100 | [diff] [blame] | 481 | unsigned int rr_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 482 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
| 483 | struct { |
| 484 | int curr; /* highest queued rt task prio */ |
| 485 | #ifdef CONFIG_SMP |
| 486 | int next; /* next highest */ |
| 487 | #endif |
| 488 | } highest_prio; |
| 489 | #endif |
| 490 | #ifdef CONFIG_SMP |
| 491 | unsigned long rt_nr_migratory; |
| 492 | unsigned long rt_nr_total; |
| 493 | int overloaded; |
| 494 | struct plist_head pushable_tasks; |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 495 | #ifdef HAVE_RT_PUSH_IPI |
| 496 | int push_flags; |
| 497 | int push_cpu; |
| 498 | struct irq_work push_work; |
| 499 | raw_spinlock_t push_lock; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 500 | #endif |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 501 | #endif /* CONFIG_SMP */ |
Kirill Tkhai | f4ebcbc | 2014-03-15 02:15:00 +0400 | [diff] [blame] | 502 | int rt_queued; |
| 503 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 504 | int rt_throttled; |
| 505 | u64 rt_time; |
| 506 | u64 rt_runtime; |
| 507 | /* Nests inside the rq lock: */ |
| 508 | raw_spinlock_t rt_runtime_lock; |
| 509 | |
| 510 | #ifdef CONFIG_RT_GROUP_SCHED |
| 511 | unsigned long rt_nr_boosted; |
| 512 | |
| 513 | struct rq *rq; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 514 | struct task_group *tg; |
| 515 | #endif |
| 516 | }; |
| 517 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 518 | /* Deadline class' related fields in a runqueue */ |
| 519 | struct dl_rq { |
| 520 | /* runqueue is an rbtree, ordered by deadline */ |
| 521 | struct rb_root rb_root; |
| 522 | struct rb_node *rb_leftmost; |
| 523 | |
| 524 | unsigned long dl_nr_running; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 525 | |
| 526 | #ifdef CONFIG_SMP |
| 527 | /* |
| 528 | * Deadline values of the currently executing and the |
| 529 | * earliest ready task on this rq. Caching these facilitates |
| 530 | * the decision wether or not a ready but not running task |
| 531 | * should migrate somewhere else. |
| 532 | */ |
| 533 | struct { |
| 534 | u64 curr; |
| 535 | u64 next; |
| 536 | } earliest_dl; |
| 537 | |
| 538 | unsigned long dl_nr_migratory; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 539 | int overloaded; |
| 540 | |
| 541 | /* |
| 542 | * Tasks on this rq that can be pushed away. They are kept in |
| 543 | * an rb-tree, ordered by tasks' deadlines, with caching |
| 544 | * of the leftmost (earliest deadline) element. |
| 545 | */ |
| 546 | struct rb_root pushable_dl_tasks_root; |
| 547 | struct rb_node *pushable_dl_tasks_leftmost; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 548 | #else |
| 549 | struct dl_bw dl_bw; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 550 | #endif |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 551 | }; |
| 552 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 553 | #ifdef CONFIG_SMP |
| 554 | |
Tim Chen | afe06ef | 2016-11-22 12:23:53 -0800 | [diff] [blame] | 555 | static inline bool sched_asym_prefer(int a, int b) |
| 556 | { |
| 557 | return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); |
| 558 | } |
| 559 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 560 | /* |
| 561 | * We add the notion of a root-domain which will be used to define per-domain |
| 562 | * variables. Each exclusive cpuset essentially defines an island domain by |
| 563 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
| 564 | * exclusive cpuset is created, we also create and attach a new root-domain |
| 565 | * object. |
| 566 | * |
| 567 | */ |
| 568 | struct root_domain { |
| 569 | atomic_t refcount; |
| 570 | atomic_t rto_count; |
| 571 | struct rcu_head rcu; |
| 572 | cpumask_var_t span; |
| 573 | cpumask_var_t online; |
| 574 | |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 575 | /* Indicate more than one runnable task for any CPU */ |
| 576 | bool overload; |
| 577 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 578 | /* |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 579 | * The bit corresponding to a CPU gets set here if such CPU has more |
| 580 | * than one runnable -deadline task (as it is below for RT tasks). |
| 581 | */ |
| 582 | cpumask_var_t dlo_mask; |
| 583 | atomic_t dlo_count; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 584 | struct dl_bw dl_bw; |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 585 | struct cpudl cpudl; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 586 | |
| 587 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 588 | * The "RT overload" flag: it gets set if a CPU has more than |
| 589 | * one runnable RT task. |
| 590 | */ |
| 591 | cpumask_var_t rto_mask; |
| 592 | struct cpupri cpupri; |
Dietmar Eggemann | cd92bfd | 2016-08-01 19:53:35 +0100 | [diff] [blame] | 593 | |
| 594 | unsigned long max_cpu_capacity; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 595 | }; |
| 596 | |
| 597 | extern struct root_domain def_root_domain; |
Ingo Molnar | f2cb136 | 2017-02-01 13:10:18 +0100 | [diff] [blame] | 598 | extern struct mutex sched_domains_mutex; |
| 599 | extern cpumask_var_t fallback_doms; |
| 600 | extern cpumask_var_t sched_domains_tmpmask; |
| 601 | |
| 602 | extern void init_defrootdomain(void); |
| 603 | extern int init_sched_domains(const struct cpumask *cpu_map); |
| 604 | extern void rq_attach_root(struct rq *rq, struct root_domain *rd); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 605 | |
| 606 | #endif /* CONFIG_SMP */ |
| 607 | |
| 608 | /* |
| 609 | * This is the main, per-CPU runqueue data structure. |
| 610 | * |
| 611 | * Locking rule: those places that want to lock multiple runqueues |
| 612 | * (such as the load balancing or the thread migration code), lock |
| 613 | * acquire operations must be ordered by ascending &runqueue. |
| 614 | */ |
| 615 | struct rq { |
| 616 | /* runqueue lock: */ |
| 617 | raw_spinlock_t lock; |
| 618 | |
| 619 | /* |
| 620 | * nr_running and cpu_load should be in the same cacheline because |
| 621 | * remote CPUs use both these fields when doing load calculation. |
| 622 | */ |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 623 | unsigned int nr_running; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 624 | #ifdef CONFIG_NUMA_BALANCING |
| 625 | unsigned int nr_numa_running; |
| 626 | unsigned int nr_preferred_running; |
| 627 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 628 | #define CPU_LOAD_IDX_MAX 5 |
| 629 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 630 | #ifdef CONFIG_NO_HZ_COMMON |
Frederic Weisbecker | 9fd81dd | 2016-04-19 17:36:51 +0200 | [diff] [blame] | 631 | #ifdef CONFIG_SMP |
| 632 | unsigned long last_load_update_tick; |
| 633 | #endif /* CONFIG_SMP */ |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 634 | unsigned long nohz_flags; |
Frederic Weisbecker | 9fd81dd | 2016-04-19 17:36:51 +0200 | [diff] [blame] | 635 | #endif /* CONFIG_NO_HZ_COMMON */ |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 636 | #ifdef CONFIG_NO_HZ_FULL |
| 637 | unsigned long last_sched_tick; |
| 638 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 639 | /* capture load from *all* tasks on this cpu: */ |
| 640 | struct load_weight load; |
| 641 | unsigned long nr_load_updates; |
| 642 | u64 nr_switches; |
| 643 | |
| 644 | struct cfs_rq cfs; |
| 645 | struct rt_rq rt; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 646 | struct dl_rq dl; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 647 | |
| 648 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 649 | /* list of leaf cfs_rq on this cpu: */ |
| 650 | struct list_head leaf_cfs_rq_list; |
Vincent Guittot | 9c2791f | 2016-11-08 10:53:43 +0100 | [diff] [blame] | 651 | struct list_head *tmp_alone_branch; |
Peter Zijlstra | a35b646 | 2012-08-08 21:46:40 +0200 | [diff] [blame] | 652 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 653 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 654 | /* |
| 655 | * This is part of a global counter where only the total sum |
| 656 | * over all CPUs matters. A task can increase this counter on |
| 657 | * one CPU and if it got migrated afterwards it may decrease |
| 658 | * it on another CPU. Always updated under the runqueue lock: |
| 659 | */ |
| 660 | unsigned long nr_uninterruptible; |
| 661 | |
| 662 | struct task_struct *curr, *idle, *stop; |
| 663 | unsigned long next_balance; |
| 664 | struct mm_struct *prev_mm; |
| 665 | |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 666 | unsigned int clock_update_flags; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 667 | u64 clock; |
| 668 | u64 clock_task; |
| 669 | |
| 670 | atomic_t nr_iowait; |
| 671 | |
| 672 | #ifdef CONFIG_SMP |
| 673 | struct root_domain *rd; |
| 674 | struct sched_domain *sd; |
| 675 | |
Nicolas Pitre | ced549f | 2014-05-26 18:19:38 -0400 | [diff] [blame] | 676 | unsigned long cpu_capacity; |
Vincent Guittot | ca6d75e | 2015-02-27 16:54:09 +0100 | [diff] [blame] | 677 | unsigned long cpu_capacity_orig; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 678 | |
Peter Zijlstra | e3fca9e | 2015-06-11 14:46:37 +0200 | [diff] [blame] | 679 | struct callback_head *balance_callback; |
| 680 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 681 | unsigned char idle_balance; |
| 682 | /* For active balancing */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 683 | int active_balance; |
| 684 | int push_cpu; |
| 685 | struct cpu_stop_work active_balance_work; |
| 686 | /* cpu of this runqueue: */ |
| 687 | int cpu; |
| 688 | int online; |
| 689 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 690 | struct list_head cfs_tasks; |
| 691 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 692 | u64 rt_avg; |
| 693 | u64 age_stamp; |
| 694 | u64 idle_stamp; |
| 695 | u64 avg_idle; |
Jason Low | 9bd721c | 2013-09-13 11:26:52 -0700 | [diff] [blame] | 696 | |
| 697 | /* This is used to determine avg_idle's max value */ |
| 698 | u64 max_idle_balance_cost; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 699 | #endif |
| 700 | |
| 701 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 702 | u64 prev_irq_time; |
| 703 | #endif |
| 704 | #ifdef CONFIG_PARAVIRT |
| 705 | u64 prev_steal_time; |
| 706 | #endif |
| 707 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 708 | u64 prev_steal_time_rq; |
| 709 | #endif |
| 710 | |
| 711 | /* calc_load related fields */ |
| 712 | unsigned long calc_load_update; |
| 713 | long calc_load_active; |
| 714 | |
| 715 | #ifdef CONFIG_SCHED_HRTICK |
| 716 | #ifdef CONFIG_SMP |
| 717 | int hrtick_csd_pending; |
| 718 | struct call_single_data hrtick_csd; |
| 719 | #endif |
| 720 | struct hrtimer hrtick_timer; |
| 721 | #endif |
| 722 | |
| 723 | #ifdef CONFIG_SCHEDSTATS |
| 724 | /* latency stats */ |
| 725 | struct sched_info rq_sched_info; |
| 726 | unsigned long long rq_cpu_time; |
| 727 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
| 728 | |
| 729 | /* sys_sched_yield() stats */ |
| 730 | unsigned int yld_count; |
| 731 | |
| 732 | /* schedule() stats */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 733 | unsigned int sched_count; |
| 734 | unsigned int sched_goidle; |
| 735 | |
| 736 | /* try_to_wake_up() stats */ |
| 737 | unsigned int ttwu_count; |
| 738 | unsigned int ttwu_local; |
| 739 | #endif |
| 740 | |
| 741 | #ifdef CONFIG_SMP |
| 742 | struct llist_head wake_list; |
| 743 | #endif |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 744 | |
| 745 | #ifdef CONFIG_CPU_IDLE |
| 746 | /* Must be inspected within a rcu lock section */ |
| 747 | struct cpuidle_state *idle_state; |
| 748 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 749 | }; |
| 750 | |
| 751 | static inline int cpu_of(struct rq *rq) |
| 752 | { |
| 753 | #ifdef CONFIG_SMP |
| 754 | return rq->cpu; |
| 755 | #else |
| 756 | return 0; |
| 757 | #endif |
| 758 | } |
| 759 | |
Peter Zijlstra | 1b568f0 | 2016-05-09 10:38:41 +0200 | [diff] [blame] | 760 | |
| 761 | #ifdef CONFIG_SCHED_SMT |
| 762 | |
| 763 | extern struct static_key_false sched_smt_present; |
| 764 | |
| 765 | extern void __update_idle_core(struct rq *rq); |
| 766 | |
| 767 | static inline void update_idle_core(struct rq *rq) |
| 768 | { |
| 769 | if (static_branch_unlikely(&sched_smt_present)) |
| 770 | __update_idle_core(rq); |
| 771 | } |
| 772 | |
| 773 | #else |
| 774 | static inline void update_idle_core(struct rq *rq) { } |
| 775 | #endif |
| 776 | |
Pranith Kumar | 8b06c55 | 2014-08-13 13:28:12 -0400 | [diff] [blame] | 777 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 778 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 779 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 780 | #define this_rq() this_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 781 | #define task_rq(p) cpu_rq(task_cpu(p)) |
| 782 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 783 | #define raw_rq() raw_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 784 | |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 785 | static inline u64 __rq_clock_broken(struct rq *rq) |
| 786 | { |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 787 | return READ_ONCE(rq->clock); |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 788 | } |
| 789 | |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 790 | /* |
| 791 | * rq::clock_update_flags bits |
| 792 | * |
| 793 | * %RQCF_REQ_SKIP - will request skipping of clock update on the next |
| 794 | * call to __schedule(). This is an optimisation to avoid |
| 795 | * neighbouring rq clock updates. |
| 796 | * |
| 797 | * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is |
| 798 | * in effect and calls to update_rq_clock() are being ignored. |
| 799 | * |
| 800 | * %RQCF_UPDATED - is a debug flag that indicates whether a call has been |
| 801 | * made to update_rq_clock() since the last time rq::lock was pinned. |
| 802 | * |
| 803 | * If inside of __schedule(), clock_update_flags will have been |
| 804 | * shifted left (a left shift is a cheap operation for the fast path |
| 805 | * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, |
| 806 | * |
| 807 | * if (rq-clock_update_flags >= RQCF_UPDATED) |
| 808 | * |
| 809 | * to check if %RQCF_UPADTED is set. It'll never be shifted more than |
| 810 | * one position though, because the next rq_unpin_lock() will shift it |
| 811 | * back. |
| 812 | */ |
| 813 | #define RQCF_REQ_SKIP 0x01 |
| 814 | #define RQCF_ACT_SKIP 0x02 |
| 815 | #define RQCF_UPDATED 0x04 |
| 816 | |
| 817 | static inline void assert_clock_updated(struct rq *rq) |
| 818 | { |
| 819 | /* |
| 820 | * The only reason for not seeing a clock update since the |
| 821 | * last rq_pin_lock() is if we're currently skipping updates. |
| 822 | */ |
| 823 | SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); |
| 824 | } |
| 825 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 826 | static inline u64 rq_clock(struct rq *rq) |
| 827 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 828 | lockdep_assert_held(&rq->lock); |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 829 | assert_clock_updated(rq); |
| 830 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 831 | return rq->clock; |
| 832 | } |
| 833 | |
| 834 | static inline u64 rq_clock_task(struct rq *rq) |
| 835 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 836 | lockdep_assert_held(&rq->lock); |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 837 | assert_clock_updated(rq); |
| 838 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 839 | return rq->clock_task; |
| 840 | } |
| 841 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 842 | static inline void rq_clock_skip_update(struct rq *rq, bool skip) |
| 843 | { |
| 844 | lockdep_assert_held(&rq->lock); |
| 845 | if (skip) |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 846 | rq->clock_update_flags |= RQCF_REQ_SKIP; |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 847 | else |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 848 | rq->clock_update_flags &= ~RQCF_REQ_SKIP; |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 849 | } |
| 850 | |
Matt Fleming | d8ac897 | 2016-09-21 14:38:10 +0100 | [diff] [blame] | 851 | struct rq_flags { |
| 852 | unsigned long flags; |
| 853 | struct pin_cookie cookie; |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 854 | #ifdef CONFIG_SCHED_DEBUG |
| 855 | /* |
| 856 | * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the |
| 857 | * current pin context is stashed here in case it needs to be |
| 858 | * restored in rq_repin_lock(). |
| 859 | */ |
| 860 | unsigned int clock_update_flags; |
| 861 | #endif |
Matt Fleming | d8ac897 | 2016-09-21 14:38:10 +0100 | [diff] [blame] | 862 | }; |
| 863 | |
| 864 | static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) |
| 865 | { |
| 866 | rf->cookie = lockdep_pin_lock(&rq->lock); |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 867 | |
| 868 | #ifdef CONFIG_SCHED_DEBUG |
| 869 | rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
| 870 | rf->clock_update_flags = 0; |
| 871 | #endif |
Matt Fleming | d8ac897 | 2016-09-21 14:38:10 +0100 | [diff] [blame] | 872 | } |
| 873 | |
| 874 | static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) |
| 875 | { |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 876 | #ifdef CONFIG_SCHED_DEBUG |
| 877 | if (rq->clock_update_flags > RQCF_ACT_SKIP) |
| 878 | rf->clock_update_flags = RQCF_UPDATED; |
| 879 | #endif |
| 880 | |
Matt Fleming | d8ac897 | 2016-09-21 14:38:10 +0100 | [diff] [blame] | 881 | lockdep_unpin_lock(&rq->lock, rf->cookie); |
| 882 | } |
| 883 | |
| 884 | static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) |
| 885 | { |
| 886 | lockdep_repin_lock(&rq->lock, rf->cookie); |
Matt Fleming | cb42c9a | 2016-09-21 14:38:13 +0100 | [diff] [blame] | 887 | |
| 888 | #ifdef CONFIG_SCHED_DEBUG |
| 889 | /* |
| 890 | * Restore the value we stashed in @rf for this pin context. |
| 891 | */ |
| 892 | rq->clock_update_flags |= rf->clock_update_flags; |
| 893 | #endif |
Matt Fleming | d8ac897 | 2016-09-21 14:38:10 +0100 | [diff] [blame] | 894 | } |
| 895 | |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 896 | #ifdef CONFIG_NUMA |
Rik van Riel | e3fe70b | 2014-10-17 03:29:50 -0400 | [diff] [blame] | 897 | enum numa_topology_type { |
| 898 | NUMA_DIRECT, |
| 899 | NUMA_GLUELESS_MESH, |
| 900 | NUMA_BACKPLANE, |
| 901 | }; |
| 902 | extern enum numa_topology_type sched_numa_topology_type; |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 903 | extern int sched_max_numa_distance; |
| 904 | extern bool find_numa_distance(int distance); |
| 905 | #endif |
| 906 | |
Ingo Molnar | f2cb136 | 2017-02-01 13:10:18 +0100 | [diff] [blame] | 907 | #ifdef CONFIG_NUMA |
| 908 | extern void sched_init_numa(void); |
| 909 | extern void sched_domains_numa_masks_set(unsigned int cpu); |
| 910 | extern void sched_domains_numa_masks_clear(unsigned int cpu); |
| 911 | #else |
| 912 | static inline void sched_init_numa(void) { } |
| 913 | static inline void sched_domains_numa_masks_set(unsigned int cpu) { } |
| 914 | static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } |
| 915 | #endif |
| 916 | |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 917 | #ifdef CONFIG_NUMA_BALANCING |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 918 | /* The regions in numa_faults array from task_struct */ |
| 919 | enum numa_faults_stats { |
| 920 | NUMA_MEM = 0, |
| 921 | NUMA_CPU, |
| 922 | NUMA_MEMBUF, |
| 923 | NUMA_CPUBUF |
| 924 | }; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 925 | extern void sched_setnuma(struct task_struct *p, int node); |
Mel Gorman | e6628d5 | 2013-10-07 11:29:02 +0100 | [diff] [blame] | 926 | extern int migrate_task_to(struct task_struct *p, int cpu); |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 927 | extern int migrate_swap(struct task_struct *, struct task_struct *); |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 928 | #endif /* CONFIG_NUMA_BALANCING */ |
| 929 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 930 | #ifdef CONFIG_SMP |
| 931 | |
Peter Zijlstra | e3fca9e | 2015-06-11 14:46:37 +0200 | [diff] [blame] | 932 | static inline void |
| 933 | queue_balance_callback(struct rq *rq, |
| 934 | struct callback_head *head, |
| 935 | void (*func)(struct rq *rq)) |
| 936 | { |
| 937 | lockdep_assert_held(&rq->lock); |
| 938 | |
| 939 | if (unlikely(head->next)) |
| 940 | return; |
| 941 | |
| 942 | head->func = (void (*)(struct callback_head *))func; |
| 943 | head->next = rq->balance_callback; |
| 944 | rq->balance_callback = head; |
| 945 | } |
| 946 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 947 | extern void sched_ttwu_pending(void); |
| 948 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 949 | #define rcu_dereference_check_sched_domain(p) \ |
| 950 | rcu_dereference_check((p), \ |
| 951 | lockdep_is_held(&sched_domains_mutex)) |
| 952 | |
| 953 | /* |
| 954 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
| 955 | * See detach_destroy_domains: synchronize_sched for details. |
| 956 | * |
| 957 | * The domain tree of any CPU may only be accessed from within |
| 958 | * preempt-disabled sections. |
| 959 | */ |
| 960 | #define for_each_domain(cpu, __sd) \ |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 961 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ |
| 962 | __sd; __sd = __sd->parent) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 963 | |
Suresh Siddha | 77e8136 | 2011-11-17 11:08:23 -0800 | [diff] [blame] | 964 | #define for_each_lower_domain(sd) for (; sd; sd = sd->child) |
| 965 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 966 | /** |
| 967 | * highest_flag_domain - Return highest sched_domain containing flag. |
| 968 | * @cpu: The cpu whose highest level of sched domain is to |
| 969 | * be returned. |
| 970 | * @flag: The flag to check for the highest sched_domain |
| 971 | * for the given cpu. |
| 972 | * |
| 973 | * Returns the highest sched_domain of a cpu which contains the given flag. |
| 974 | */ |
| 975 | static inline struct sched_domain *highest_flag_domain(int cpu, int flag) |
| 976 | { |
| 977 | struct sched_domain *sd, *hsd = NULL; |
| 978 | |
| 979 | for_each_domain(cpu, sd) { |
| 980 | if (!(sd->flags & flag)) |
| 981 | break; |
| 982 | hsd = sd; |
| 983 | } |
| 984 | |
| 985 | return hsd; |
| 986 | } |
| 987 | |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 988 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) |
| 989 | { |
| 990 | struct sched_domain *sd; |
| 991 | |
| 992 | for_each_domain(cpu, sd) { |
| 993 | if (sd->flags & flag) |
| 994 | break; |
| 995 | } |
| 996 | |
| 997 | return sd; |
| 998 | } |
| 999 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1000 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
Peter Zijlstra | 7d9ffa8 | 2013-07-04 12:56:46 +0800 | [diff] [blame] | 1001 | DECLARE_PER_CPU(int, sd_llc_size); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1002 | DECLARE_PER_CPU(int, sd_llc_id); |
Peter Zijlstra | 0e369d7 | 2016-05-09 10:38:01 +0200 | [diff] [blame] | 1003 | DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 1004 | DECLARE_PER_CPU(struct sched_domain *, sd_numa); |
Preeti U Murthy | 37dc6b5 | 2013-10-30 08:42:52 +0530 | [diff] [blame] | 1005 | DECLARE_PER_CPU(struct sched_domain *, sd_asym); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1006 | |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1007 | struct sched_group_capacity { |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1008 | atomic_t ref; |
| 1009 | /* |
Yuyang Du | 172895e | 2016-04-05 12:12:27 +0800 | [diff] [blame] | 1010 | * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1011 | * for a single CPU. |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1012 | */ |
Morten Rasmussen | bf475ce | 2016-10-14 14:41:09 +0100 | [diff] [blame] | 1013 | unsigned long capacity; |
| 1014 | unsigned long min_capacity; /* Min per-CPU capacity in group */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1015 | unsigned long next_update; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1016 | int imbalance; /* XXX unrelated to capacity but shared group state */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1017 | |
| 1018 | unsigned long cpumask[0]; /* iteration mask */ |
| 1019 | }; |
| 1020 | |
| 1021 | struct sched_group { |
| 1022 | struct sched_group *next; /* Must be a circular list */ |
| 1023 | atomic_t ref; |
| 1024 | |
| 1025 | unsigned int group_weight; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1026 | struct sched_group_capacity *sgc; |
Tim Chen | afe06ef | 2016-11-22 12:23:53 -0800 | [diff] [blame] | 1027 | int asym_prefer_cpu; /* cpu of highest priority in group */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1028 | |
| 1029 | /* |
| 1030 | * The CPUs this group covers. |
| 1031 | * |
| 1032 | * NOTE: this field is variable length. (Allocated dynamically |
| 1033 | * by attaching extra space to the end of the structure, |
| 1034 | * depending on how many CPUs the kernel has booted up with) |
| 1035 | */ |
| 1036 | unsigned long cpumask[0]; |
| 1037 | }; |
| 1038 | |
| 1039 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
| 1040 | { |
| 1041 | return to_cpumask(sg->cpumask); |
| 1042 | } |
| 1043 | |
| 1044 | /* |
| 1045 | * cpumask masking which cpus in the group are allowed to iterate up the domain |
| 1046 | * tree. |
| 1047 | */ |
| 1048 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) |
| 1049 | { |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1050 | return to_cpumask(sg->sgc->cpumask); |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 1051 | } |
| 1052 | |
| 1053 | /** |
| 1054 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. |
| 1055 | * @group: The group whose first cpu is to be returned. |
| 1056 | */ |
| 1057 | static inline unsigned int group_first_cpu(struct sched_group *group) |
| 1058 | { |
| 1059 | return cpumask_first(sched_group_cpus(group)); |
| 1060 | } |
| 1061 | |
Peter Zijlstra | c117487 | 2012-05-31 14:47:33 +0200 | [diff] [blame] | 1062 | extern int group_balance_cpu(struct sched_group *sg); |
| 1063 | |
Steven Rostedt (Red Hat) | 3866e84 | 2016-02-22 16:26:51 -0500 | [diff] [blame] | 1064 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) |
| 1065 | void register_sched_domain_sysctl(void); |
| 1066 | void unregister_sched_domain_sysctl(void); |
| 1067 | #else |
| 1068 | static inline void register_sched_domain_sysctl(void) |
| 1069 | { |
| 1070 | } |
| 1071 | static inline void unregister_sched_domain_sysctl(void) |
| 1072 | { |
| 1073 | } |
| 1074 | #endif |
| 1075 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 1076 | #else |
| 1077 | |
| 1078 | static inline void sched_ttwu_pending(void) { } |
| 1079 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 1080 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1081 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 1082 | #include "stats.h" |
Ingo Molnar | 1051408 | 2017-02-01 18:42:41 +0100 | [diff] [blame] | 1083 | #include "autogroup.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1084 | |
| 1085 | #ifdef CONFIG_CGROUP_SCHED |
| 1086 | |
| 1087 | /* |
| 1088 | * Return the group to which this tasks belongs. |
| 1089 | * |
Tejun Heo | 8af01f5 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 1090 | * We cannot use task_css() and friends because the cgroup subsystem |
| 1091 | * changes that value before the cgroup_subsys::attach() method is called, |
| 1092 | * therefore we cannot pin it and might observe the wrong value. |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 1093 | * |
| 1094 | * The same is true for autogroup's p->signal->autogroup->tg, the autogroup |
| 1095 | * core changes this before calling sched_move_task(). |
| 1096 | * |
| 1097 | * Instead we use a 'copy' which is updated from sched_move_task() while |
| 1098 | * holding both task_struct::pi_lock and rq::lock. |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1099 | */ |
| 1100 | static inline struct task_group *task_group(struct task_struct *p) |
| 1101 | { |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 1102 | return p->sched_task_group; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1103 | } |
| 1104 | |
| 1105 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 1106 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 1107 | { |
| 1108 | #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) |
| 1109 | struct task_group *tg = task_group(p); |
| 1110 | #endif |
| 1111 | |
| 1112 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Byungchul Park | ad936d8 | 2015-10-24 01:16:19 +0900 | [diff] [blame] | 1113 | set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1114 | p->se.cfs_rq = tg->cfs_rq[cpu]; |
| 1115 | p->se.parent = tg->se[cpu]; |
| 1116 | #endif |
| 1117 | |
| 1118 | #ifdef CONFIG_RT_GROUP_SCHED |
| 1119 | p->rt.rt_rq = tg->rt_rq[cpu]; |
| 1120 | p->rt.parent = tg->rt_se[cpu]; |
| 1121 | #endif |
| 1122 | } |
| 1123 | |
| 1124 | #else /* CONFIG_CGROUP_SCHED */ |
| 1125 | |
| 1126 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| 1127 | static inline struct task_group *task_group(struct task_struct *p) |
| 1128 | { |
| 1129 | return NULL; |
| 1130 | } |
| 1131 | |
| 1132 | #endif /* CONFIG_CGROUP_SCHED */ |
| 1133 | |
| 1134 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1135 | { |
| 1136 | set_task_rq(p, cpu); |
| 1137 | #ifdef CONFIG_SMP |
| 1138 | /* |
| 1139 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
| 1140 | * successfuly executed on another CPU. We must ensure that updates of |
| 1141 | * per-task data have been completed by this moment. |
| 1142 | */ |
| 1143 | smp_wmb(); |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1144 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1145 | p->cpu = cpu; |
| 1146 | #else |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1147 | task_thread_info(p)->cpu = cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1148 | #endif |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 1149 | p->wake_cpu = cpu; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1150 | #endif |
| 1151 | } |
| 1152 | |
| 1153 | /* |
| 1154 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| 1155 | */ |
| 1156 | #ifdef CONFIG_SCHED_DEBUG |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1157 | # include <linux/static_key.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1158 | # define const_debug __read_mostly |
| 1159 | #else |
| 1160 | # define const_debug const |
| 1161 | #endif |
| 1162 | |
| 1163 | extern const_debug unsigned int sysctl_sched_features; |
| 1164 | |
| 1165 | #define SCHED_FEAT(name, enabled) \ |
| 1166 | __SCHED_FEAT_##name , |
| 1167 | |
| 1168 | enum { |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 1169 | #include "features.h" |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1170 | __SCHED_FEAT_NR, |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1171 | }; |
| 1172 | |
| 1173 | #undef SCHED_FEAT |
| 1174 | |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1175 | #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1176 | #define SCHED_FEAT(name, enabled) \ |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1177 | static __always_inline bool static_branch_##name(struct static_key *key) \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1178 | { \ |
Jason Baron | 6e76ea8 | 2014-07-02 15:52:41 +0000 | [diff] [blame] | 1179 | return static_key_##enabled(key); \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1180 | } |
| 1181 | |
| 1182 | #include "features.h" |
| 1183 | |
| 1184 | #undef SCHED_FEAT |
| 1185 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1186 | extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1187 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
| 1188 | #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1189 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1190 | #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1191 | |
Srikar Dronamraju | 2a59572 | 2015-08-11 21:54:21 +0530 | [diff] [blame] | 1192 | extern struct static_key_false sched_numa_balancing; |
Mel Gorman | cb25176 | 2016-02-05 09:08:36 +0000 | [diff] [blame] | 1193 | extern struct static_key_false sched_schedstats; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1194 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1195 | static inline u64 global_rt_period(void) |
| 1196 | { |
| 1197 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| 1198 | } |
| 1199 | |
| 1200 | static inline u64 global_rt_runtime(void) |
| 1201 | { |
| 1202 | if (sysctl_sched_rt_runtime < 0) |
| 1203 | return RUNTIME_INF; |
| 1204 | |
| 1205 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| 1206 | } |
| 1207 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1208 | static inline int task_current(struct rq *rq, struct task_struct *p) |
| 1209 | { |
| 1210 | return rq->curr == p; |
| 1211 | } |
| 1212 | |
| 1213 | static inline int task_running(struct rq *rq, struct task_struct *p) |
| 1214 | { |
| 1215 | #ifdef CONFIG_SMP |
| 1216 | return p->on_cpu; |
| 1217 | #else |
| 1218 | return task_current(rq, p); |
| 1219 | #endif |
| 1220 | } |
| 1221 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 1222 | static inline int task_on_rq_queued(struct task_struct *p) |
| 1223 | { |
| 1224 | return p->on_rq == TASK_ON_RQ_QUEUED; |
| 1225 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1226 | |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 1227 | static inline int task_on_rq_migrating(struct task_struct *p) |
| 1228 | { |
| 1229 | return p->on_rq == TASK_ON_RQ_MIGRATING; |
| 1230 | } |
| 1231 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1232 | #ifndef prepare_arch_switch |
| 1233 | # define prepare_arch_switch(next) do { } while (0) |
| 1234 | #endif |
Catalin Marinas | 01f23e1 | 2011-11-27 21:43:10 +0000 | [diff] [blame] | 1235 | #ifndef finish_arch_post_lock_switch |
| 1236 | # define finish_arch_post_lock_switch() do { } while (0) |
| 1237 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1238 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1239 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
| 1240 | { |
| 1241 | #ifdef CONFIG_SMP |
| 1242 | /* |
| 1243 | * We can optimise this out completely for !SMP, because the |
| 1244 | * SMP rebalancing from interrupt is the only thing that cares |
| 1245 | * here. |
| 1246 | */ |
| 1247 | next->on_cpu = 1; |
| 1248 | #endif |
| 1249 | } |
| 1250 | |
| 1251 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
| 1252 | { |
| 1253 | #ifdef CONFIG_SMP |
| 1254 | /* |
| 1255 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
| 1256 | * We must ensure this doesn't happen until the switch is completely |
| 1257 | * finished. |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1258 | * |
Peter Zijlstra | b75a225 | 2015-10-06 14:36:17 +0200 | [diff] [blame] | 1259 | * In particular, the load of prev->state in finish_task_switch() must |
| 1260 | * happen before this. |
| 1261 | * |
Peter Zijlstra | 1f03e8d | 2016-04-04 10:57:12 +0200 | [diff] [blame] | 1262 | * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1263 | */ |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1264 | smp_store_release(&prev->on_cpu, 0); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1265 | #endif |
| 1266 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 1267 | /* this is a valid case when another task releases the spinlock */ |
| 1268 | rq->lock.owner = current; |
| 1269 | #endif |
| 1270 | /* |
| 1271 | * If we are tracking spinlock dependencies then we have to |
| 1272 | * fix up the runqueue lock - which gets 'carried over' from |
| 1273 | * prev into current: |
| 1274 | */ |
| 1275 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| 1276 | |
| 1277 | raw_spin_unlock_irq(&rq->lock); |
| 1278 | } |
| 1279 | |
Li Zefan | b13095f | 2013-03-05 16:06:38 +0800 | [diff] [blame] | 1280 | /* |
| 1281 | * wake flags |
| 1282 | */ |
| 1283 | #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ |
| 1284 | #define WF_FORK 0x02 /* child wakeup after fork */ |
| 1285 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ |
| 1286 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1287 | /* |
| 1288 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| 1289 | * of tasks with abnormal "nice" values across CPUs the contribution that |
| 1290 | * each task makes to its run queue's load is weighted according to its |
| 1291 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
| 1292 | * scaled version of the new time slice allocation that they receive on time |
| 1293 | * slice expiry etc. |
| 1294 | */ |
| 1295 | |
| 1296 | #define WEIGHT_IDLEPRIO 3 |
| 1297 | #define WMULT_IDLEPRIO 1431655765 |
| 1298 | |
Andi Kleen | ed82b8a | 2015-11-29 20:59:43 -0800 | [diff] [blame] | 1299 | extern const int sched_prio_to_weight[40]; |
| 1300 | extern const u32 sched_prio_to_wmult[40]; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1301 | |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1302 | /* |
| 1303 | * {de,en}queue flags: |
| 1304 | * |
| 1305 | * DEQUEUE_SLEEP - task is no longer runnable |
| 1306 | * ENQUEUE_WAKEUP - task just became runnable |
| 1307 | * |
| 1308 | * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks |
| 1309 | * are in a known state which allows modification. Such pairs |
| 1310 | * should preserve as much state as possible. |
| 1311 | * |
| 1312 | * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location |
| 1313 | * in the runqueue. |
| 1314 | * |
| 1315 | * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) |
| 1316 | * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1317 | * ENQUEUE_MIGRATED - the task was migrated during wakeup |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1318 | * |
| 1319 | */ |
| 1320 | |
| 1321 | #define DEQUEUE_SLEEP 0x01 |
| 1322 | #define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ |
| 1323 | #define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */ |
| 1324 | |
Peter Zijlstra | 1de6444 | 2015-09-30 17:44:13 +0200 | [diff] [blame] | 1325 | #define ENQUEUE_WAKEUP 0x01 |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 1326 | #define ENQUEUE_RESTORE 0x02 |
| 1327 | #define ENQUEUE_MOVE 0x04 |
| 1328 | |
| 1329 | #define ENQUEUE_HEAD 0x08 |
| 1330 | #define ENQUEUE_REPLENISH 0x10 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1331 | #ifdef CONFIG_SMP |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1332 | #define ENQUEUE_MIGRATED 0x20 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1333 | #else |
Peter Zijlstra | 59efa0b | 2016-05-10 18:24:37 +0200 | [diff] [blame] | 1334 | #define ENQUEUE_MIGRATED 0x00 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1335 | #endif |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1336 | |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1337 | #define RETRY_TASK ((void *)-1UL) |
| 1338 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1339 | struct sched_class { |
| 1340 | const struct sched_class *next; |
| 1341 | |
| 1342 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1343 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1344 | void (*yield_task) (struct rq *rq); |
| 1345 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); |
| 1346 | |
| 1347 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
| 1348 | |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1349 | /* |
| 1350 | * It is the responsibility of the pick_next_task() method that will |
| 1351 | * return the next task to call put_prev_task() on the @prev task or |
| 1352 | * something equivalent. |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1353 | * |
| 1354 | * May return RETRY_TASK when it finds a higher prio class has runnable |
| 1355 | * tasks. |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1356 | */ |
| 1357 | struct task_struct * (*pick_next_task) (struct rq *rq, |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 1358 | struct task_struct *prev, |
Matt Fleming | d8ac897 | 2016-09-21 14:38:10 +0100 | [diff] [blame] | 1359 | struct rq_flags *rf); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1360 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
| 1361 | |
| 1362 | #ifdef CONFIG_SMP |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 1363 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
xiaofeng.yan | 5a4fd03 | 2015-09-23 14:55:59 +0800 | [diff] [blame] | 1364 | void (*migrate_task_rq)(struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1365 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1366 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
| 1367 | |
| 1368 | void (*set_cpus_allowed)(struct task_struct *p, |
| 1369 | const struct cpumask *newmask); |
| 1370 | |
| 1371 | void (*rq_online)(struct rq *rq); |
| 1372 | void (*rq_offline)(struct rq *rq); |
| 1373 | #endif |
| 1374 | |
| 1375 | void (*set_curr_task) (struct rq *rq); |
| 1376 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
| 1377 | void (*task_fork) (struct task_struct *p); |
Dario Faggioli | e6c390f | 2013-11-07 14:43:35 +0100 | [diff] [blame] | 1378 | void (*task_dead) (struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1379 | |
Kirill Tkhai | 67dfa1b | 2014-10-27 17:40:52 +0300 | [diff] [blame] | 1380 | /* |
| 1381 | * The switched_from() call is allowed to drop rq->lock, therefore we |
| 1382 | * cannot assume the switched_from/switched_to pair is serliazed by |
| 1383 | * rq->lock. They are however serialized by p->pi_lock. |
| 1384 | */ |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1385 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); |
| 1386 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
| 1387 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
| 1388 | int oldprio); |
| 1389 | |
| 1390 | unsigned int (*get_rr_interval) (struct rq *rq, |
| 1391 | struct task_struct *task); |
| 1392 | |
Stanislaw Gruszka | 6e99891 | 2014-11-12 16:58:44 +0100 | [diff] [blame] | 1393 | void (*update_curr) (struct rq *rq); |
| 1394 | |
Vincent Guittot | ea86cb4 | 2016-06-17 13:38:55 +0200 | [diff] [blame] | 1395 | #define TASK_SET_GROUP 0 |
| 1396 | #define TASK_MOVE_GROUP 1 |
| 1397 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1398 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Vincent Guittot | ea86cb4 | 2016-06-17 13:38:55 +0200 | [diff] [blame] | 1399 | void (*task_change_group) (struct task_struct *p, int type); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1400 | #endif |
| 1401 | }; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1402 | |
Peter Zijlstra | 3f1d2a3 | 2014-02-12 10:49:30 +0100 | [diff] [blame] | 1403 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
| 1404 | { |
| 1405 | prev->sched_class->put_prev_task(rq, prev); |
| 1406 | } |
| 1407 | |
Peter Zijlstra | b2bf6c3 | 2016-09-20 22:00:38 +0200 | [diff] [blame] | 1408 | static inline void set_curr_task(struct rq *rq, struct task_struct *curr) |
| 1409 | { |
| 1410 | curr->sched_class->set_curr_task(rq); |
| 1411 | } |
| 1412 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1413 | #define sched_class_highest (&stop_sched_class) |
| 1414 | #define for_each_class(class) \ |
| 1415 | for (class = sched_class_highest; class; class = class->next) |
| 1416 | |
| 1417 | extern const struct sched_class stop_sched_class; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1418 | extern const struct sched_class dl_sched_class; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1419 | extern const struct sched_class rt_sched_class; |
| 1420 | extern const struct sched_class fair_sched_class; |
| 1421 | extern const struct sched_class idle_sched_class; |
| 1422 | |
| 1423 | |
| 1424 | #ifdef CONFIG_SMP |
| 1425 | |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1426 | extern void update_group_capacity(struct sched_domain *sd, int cpu); |
Li Zefan | b719203 | 2013-03-07 10:00:26 +0800 | [diff] [blame] | 1427 | |
Daniel Lezcano | 7caff66 | 2014-01-06 12:34:38 +0100 | [diff] [blame] | 1428 | extern void trigger_load_balance(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1429 | |
Peter Zijlstra | c5b2803 | 2015-05-15 17:43:35 +0200 | [diff] [blame] | 1430 | extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); |
| 1431 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1432 | #endif |
| 1433 | |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1434 | #ifdef CONFIG_CPU_IDLE |
| 1435 | static inline void idle_set_state(struct rq *rq, |
| 1436 | struct cpuidle_state *idle_state) |
| 1437 | { |
| 1438 | rq->idle_state = idle_state; |
| 1439 | } |
| 1440 | |
| 1441 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1442 | { |
Peter Zijlstra | 9148a3a | 2016-09-20 22:34:51 +0200 | [diff] [blame] | 1443 | SCHED_WARN_ON(!rcu_read_lock_held()); |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1444 | return rq->idle_state; |
| 1445 | } |
| 1446 | #else |
| 1447 | static inline void idle_set_state(struct rq *rq, |
| 1448 | struct cpuidle_state *idle_state) |
| 1449 | { |
| 1450 | } |
| 1451 | |
| 1452 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1453 | { |
| 1454 | return NULL; |
| 1455 | } |
| 1456 | #endif |
| 1457 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1458 | extern void sysrq_sched_debug_show(void); |
| 1459 | extern void sched_init_granularity(void); |
| 1460 | extern void update_max_interval(void); |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 1461 | |
| 1462 | extern void init_sched_dl_class(void); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1463 | extern void init_sched_rt_class(void); |
| 1464 | extern void init_sched_fair_class(void); |
| 1465 | |
Kirill Tkhai | 8875125 | 2014-06-29 00:03:57 +0400 | [diff] [blame] | 1466 | extern void resched_curr(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1467 | extern void resched_cpu(int cpu); |
| 1468 | |
| 1469 | extern struct rt_bandwidth def_rt_bandwidth; |
| 1470 | extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); |
| 1471 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1472 | extern struct dl_bandwidth def_dl_bandwidth; |
| 1473 | extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1474 | extern void init_dl_task_timer(struct sched_dl_entity *dl_se); |
| 1475 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1476 | unsigned long to_ratio(u64 period, u64 runtime); |
| 1477 | |
Yuyang Du | 540247f | 2015-07-15 08:04:39 +0800 | [diff] [blame] | 1478 | extern void init_entity_runnable_average(struct sched_entity *se); |
Yuyang Du | 2b8c41d | 2016-03-30 04:30:56 +0800 | [diff] [blame] | 1479 | extern void post_init_entity_util_avg(struct sched_entity *se); |
Alex Shi | a75cdaa | 2013-06-20 10:18:47 +0800 | [diff] [blame] | 1480 | |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1481 | #ifdef CONFIG_NO_HZ_FULL |
| 1482 | extern bool sched_can_stop_tick(struct rq *rq); |
| 1483 | |
| 1484 | /* |
| 1485 | * Tick may be needed by tasks in the runqueue depending on their policy and |
| 1486 | * requirements. If tick is needed, lets send the target an IPI to kick it out of |
| 1487 | * nohz mode if necessary. |
| 1488 | */ |
| 1489 | static inline void sched_update_tick_dependency(struct rq *rq) |
| 1490 | { |
| 1491 | int cpu; |
| 1492 | |
| 1493 | if (!tick_nohz_full_enabled()) |
| 1494 | return; |
| 1495 | |
| 1496 | cpu = cpu_of(rq); |
| 1497 | |
| 1498 | if (!tick_nohz_full_cpu(cpu)) |
| 1499 | return; |
| 1500 | |
| 1501 | if (sched_can_stop_tick(rq)) |
| 1502 | tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); |
| 1503 | else |
| 1504 | tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); |
| 1505 | } |
| 1506 | #else |
| 1507 | static inline void sched_update_tick_dependency(struct rq *rq) { } |
| 1508 | #endif |
| 1509 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1510 | static inline void add_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1511 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1512 | unsigned prev_nr = rq->nr_running; |
| 1513 | |
| 1514 | rq->nr_running = prev_nr + count; |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1515 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1516 | if (prev_nr < 2 && rq->nr_running >= 2) { |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1517 | #ifdef CONFIG_SMP |
| 1518 | if (!rq->rd->overload) |
| 1519 | rq->rd->overload = true; |
| 1520 | #endif |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1521 | } |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1522 | |
| 1523 | sched_update_tick_dependency(rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1524 | } |
| 1525 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1526 | static inline void sub_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1527 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1528 | rq->nr_running -= count; |
Frederic Weisbecker | 76d92ac | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 1529 | /* Check if we still need preemption */ |
| 1530 | sched_update_tick_dependency(rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1531 | } |
| 1532 | |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 1533 | static inline void rq_last_tick_reset(struct rq *rq) |
| 1534 | { |
| 1535 | #ifdef CONFIG_NO_HZ_FULL |
| 1536 | rq->last_sched_tick = jiffies; |
| 1537 | #endif |
| 1538 | } |
| 1539 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1540 | extern void update_rq_clock(struct rq *rq); |
| 1541 | |
| 1542 | extern void activate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1543 | extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1544 | |
| 1545 | extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
| 1546 | |
| 1547 | extern const_debug unsigned int sysctl_sched_time_avg; |
| 1548 | extern const_debug unsigned int sysctl_sched_nr_migrate; |
| 1549 | extern const_debug unsigned int sysctl_sched_migration_cost; |
| 1550 | |
| 1551 | static inline u64 sched_avg_period(void) |
| 1552 | { |
| 1553 | return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; |
| 1554 | } |
| 1555 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1556 | #ifdef CONFIG_SCHED_HRTICK |
| 1557 | |
| 1558 | /* |
| 1559 | * Use hrtick when: |
| 1560 | * - enabled by features |
| 1561 | * - hrtimer is actually high res |
| 1562 | */ |
| 1563 | static inline int hrtick_enabled(struct rq *rq) |
| 1564 | { |
| 1565 | if (!sched_feat(HRTICK)) |
| 1566 | return 0; |
| 1567 | if (!cpu_active(cpu_of(rq))) |
| 1568 | return 0; |
| 1569 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
| 1570 | } |
| 1571 | |
| 1572 | void hrtick_start(struct rq *rq, u64 delay); |
| 1573 | |
Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 1574 | #else |
| 1575 | |
| 1576 | static inline int hrtick_enabled(struct rq *rq) |
| 1577 | { |
| 1578 | return 0; |
| 1579 | } |
| 1580 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1581 | #endif /* CONFIG_SCHED_HRTICK */ |
| 1582 | |
| 1583 | #ifdef CONFIG_SMP |
| 1584 | extern void sched_avg_update(struct rq *rq); |
Peter Zijlstra | dfbca41 | 2015-03-23 14:19:05 +0100 | [diff] [blame] | 1585 | |
| 1586 | #ifndef arch_scale_freq_capacity |
| 1587 | static __always_inline |
| 1588 | unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) |
| 1589 | { |
| 1590 | return SCHED_CAPACITY_SCALE; |
| 1591 | } |
| 1592 | #endif |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1593 | |
Morten Rasmussen | 8cd5601 | 2015-08-14 17:23:10 +0100 | [diff] [blame] | 1594 | #ifndef arch_scale_cpu_capacity |
| 1595 | static __always_inline |
| 1596 | unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) |
| 1597 | { |
Dietmar Eggemann | e3279a2 | 2015-08-15 00:04:41 +0100 | [diff] [blame] | 1598 | if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) |
Morten Rasmussen | 8cd5601 | 2015-08-14 17:23:10 +0100 | [diff] [blame] | 1599 | return sd->smt_gain / sd->span_weight; |
| 1600 | |
| 1601 | return SCHED_CAPACITY_SCALE; |
| 1602 | } |
| 1603 | #endif |
| 1604 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1605 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1606 | { |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1607 | rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1608 | sched_avg_update(rq); |
| 1609 | } |
| 1610 | #else |
| 1611 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } |
| 1612 | static inline void sched_avg_update(struct rq *rq) { } |
| 1613 | #endif |
| 1614 | |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1615 | struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3e71a46 | 2016-04-28 16:16:33 +0200 | [diff] [blame] | 1616 | __acquires(rq->lock); |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1617 | struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1618 | __acquires(p->pi_lock) |
Peter Zijlstra | 3e71a46 | 2016-04-28 16:16:33 +0200 | [diff] [blame] | 1619 | __acquires(rq->lock); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1620 | |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1621 | static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1622 | __releases(rq->lock) |
| 1623 | { |
Matt Fleming | d8ac897 | 2016-09-21 14:38:10 +0100 | [diff] [blame] | 1624 | rq_unpin_lock(rq, rf); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1625 | raw_spin_unlock(&rq->lock); |
| 1626 | } |
| 1627 | |
| 1628 | static inline void |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1629 | task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1630 | __releases(rq->lock) |
| 1631 | __releases(p->pi_lock) |
| 1632 | { |
Matt Fleming | d8ac897 | 2016-09-21 14:38:10 +0100 | [diff] [blame] | 1633 | rq_unpin_lock(rq, rf); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1634 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | eb58075 | 2015-07-31 21:28:18 +0200 | [diff] [blame] | 1635 | raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1636 | } |
| 1637 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1638 | #ifdef CONFIG_SMP |
| 1639 | #ifdef CONFIG_PREEMPT |
| 1640 | |
| 1641 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| 1642 | |
| 1643 | /* |
| 1644 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| 1645 | * way at the expense of forcing extra atomic operations in all |
| 1646 | * invocations. This assures that the double_lock is acquired using the |
| 1647 | * same underlying policy as the spinlock_t on this architecture, which |
| 1648 | * reduces latency compared to the unfair variant below. However, it |
| 1649 | * also adds more overhead and therefore may reduce throughput. |
| 1650 | */ |
| 1651 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1652 | __releases(this_rq->lock) |
| 1653 | __acquires(busiest->lock) |
| 1654 | __acquires(this_rq->lock) |
| 1655 | { |
| 1656 | raw_spin_unlock(&this_rq->lock); |
| 1657 | double_rq_lock(this_rq, busiest); |
| 1658 | |
| 1659 | return 1; |
| 1660 | } |
| 1661 | |
| 1662 | #else |
| 1663 | /* |
| 1664 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
| 1665 | * latency by eliminating extra atomic operations when the locks are |
| 1666 | * already in proper order on entry. This favors lower cpu-ids and will |
| 1667 | * grant the double lock to lower cpus over higher ids under contention, |
| 1668 | * regardless of entry order into the function. |
| 1669 | */ |
| 1670 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1671 | __releases(this_rq->lock) |
| 1672 | __acquires(busiest->lock) |
| 1673 | __acquires(this_rq->lock) |
| 1674 | { |
| 1675 | int ret = 0; |
| 1676 | |
| 1677 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
| 1678 | if (busiest < this_rq) { |
| 1679 | raw_spin_unlock(&this_rq->lock); |
| 1680 | raw_spin_lock(&busiest->lock); |
| 1681 | raw_spin_lock_nested(&this_rq->lock, |
| 1682 | SINGLE_DEPTH_NESTING); |
| 1683 | ret = 1; |
| 1684 | } else |
| 1685 | raw_spin_lock_nested(&busiest->lock, |
| 1686 | SINGLE_DEPTH_NESTING); |
| 1687 | } |
| 1688 | return ret; |
| 1689 | } |
| 1690 | |
| 1691 | #endif /* CONFIG_PREEMPT */ |
| 1692 | |
| 1693 | /* |
| 1694 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| 1695 | */ |
| 1696 | static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1697 | { |
| 1698 | if (unlikely(!irqs_disabled())) { |
| 1699 | /* printk() doesn't work good under rq->lock */ |
| 1700 | raw_spin_unlock(&this_rq->lock); |
| 1701 | BUG_ON(1); |
| 1702 | } |
| 1703 | |
| 1704 | return _double_lock_balance(this_rq, busiest); |
| 1705 | } |
| 1706 | |
| 1707 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 1708 | __releases(busiest->lock) |
| 1709 | { |
| 1710 | raw_spin_unlock(&busiest->lock); |
| 1711 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 1712 | } |
| 1713 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 1714 | static inline void double_lock(spinlock_t *l1, spinlock_t *l2) |
| 1715 | { |
| 1716 | if (l1 > l2) |
| 1717 | swap(l1, l2); |
| 1718 | |
| 1719 | spin_lock(l1); |
| 1720 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1721 | } |
| 1722 | |
Mike Galbraith | 60e69ee | 2014-04-07 10:55:15 +0200 | [diff] [blame] | 1723 | static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) |
| 1724 | { |
| 1725 | if (l1 > l2) |
| 1726 | swap(l1, l2); |
| 1727 | |
| 1728 | spin_lock_irq(l1); |
| 1729 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1730 | } |
| 1731 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 1732 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
| 1733 | { |
| 1734 | if (l1 > l2) |
| 1735 | swap(l1, l2); |
| 1736 | |
| 1737 | raw_spin_lock(l1); |
| 1738 | raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1739 | } |
| 1740 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1741 | /* |
| 1742 | * double_rq_lock - safely lock two runqueues |
| 1743 | * |
| 1744 | * Note this does not disable interrupts like task_rq_lock, |
| 1745 | * you need to do so manually before calling. |
| 1746 | */ |
| 1747 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1748 | __acquires(rq1->lock) |
| 1749 | __acquires(rq2->lock) |
| 1750 | { |
| 1751 | BUG_ON(!irqs_disabled()); |
| 1752 | if (rq1 == rq2) { |
| 1753 | raw_spin_lock(&rq1->lock); |
| 1754 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1755 | } else { |
| 1756 | if (rq1 < rq2) { |
| 1757 | raw_spin_lock(&rq1->lock); |
| 1758 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 1759 | } else { |
| 1760 | raw_spin_lock(&rq2->lock); |
| 1761 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 1762 | } |
| 1763 | } |
| 1764 | } |
| 1765 | |
| 1766 | /* |
| 1767 | * double_rq_unlock - safely unlock two runqueues |
| 1768 | * |
| 1769 | * Note this does not restore interrupts like task_rq_unlock, |
| 1770 | * you need to do so manually after calling. |
| 1771 | */ |
| 1772 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1773 | __releases(rq1->lock) |
| 1774 | __releases(rq2->lock) |
| 1775 | { |
| 1776 | raw_spin_unlock(&rq1->lock); |
| 1777 | if (rq1 != rq2) |
| 1778 | raw_spin_unlock(&rq2->lock); |
| 1779 | else |
| 1780 | __release(rq2->lock); |
| 1781 | } |
| 1782 | |
Ingo Molnar | f2cb136 | 2017-02-01 13:10:18 +0100 | [diff] [blame] | 1783 | extern void set_rq_online (struct rq *rq); |
| 1784 | extern void set_rq_offline(struct rq *rq); |
| 1785 | extern bool sched_smp_initialized; |
| 1786 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1787 | #else /* CONFIG_SMP */ |
| 1788 | |
| 1789 | /* |
| 1790 | * double_rq_lock - safely lock two runqueues |
| 1791 | * |
| 1792 | * Note this does not disable interrupts like task_rq_lock, |
| 1793 | * you need to do so manually before calling. |
| 1794 | */ |
| 1795 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1796 | __acquires(rq1->lock) |
| 1797 | __acquires(rq2->lock) |
| 1798 | { |
| 1799 | BUG_ON(!irqs_disabled()); |
| 1800 | BUG_ON(rq1 != rq2); |
| 1801 | raw_spin_lock(&rq1->lock); |
| 1802 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1803 | } |
| 1804 | |
| 1805 | /* |
| 1806 | * double_rq_unlock - safely unlock two runqueues |
| 1807 | * |
| 1808 | * Note this does not restore interrupts like task_rq_unlock, |
| 1809 | * you need to do so manually after calling. |
| 1810 | */ |
| 1811 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1812 | __releases(rq1->lock) |
| 1813 | __releases(rq2->lock) |
| 1814 | { |
| 1815 | BUG_ON(rq1 != rq2); |
| 1816 | raw_spin_unlock(&rq1->lock); |
| 1817 | __release(rq2->lock); |
| 1818 | } |
| 1819 | |
| 1820 | #endif |
| 1821 | |
| 1822 | extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); |
| 1823 | extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); |
Srikar Dronamraju | 6b55c96 | 2015-06-25 22:51:41 +0530 | [diff] [blame] | 1824 | |
| 1825 | #ifdef CONFIG_SCHED_DEBUG |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1826 | extern void print_cfs_stats(struct seq_file *m, int cpu); |
| 1827 | extern void print_rt_stats(struct seq_file *m, int cpu); |
Wanpeng Li | acb3213 | 2014-10-31 06:39:33 +0800 | [diff] [blame] | 1828 | extern void print_dl_stats(struct seq_file *m, int cpu); |
Srikar Dronamraju | 6b55c96 | 2015-06-25 22:51:41 +0530 | [diff] [blame] | 1829 | extern void |
| 1830 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
Srikar Dronamraju | 397f237 | 2015-06-25 22:51:43 +0530 | [diff] [blame] | 1831 | |
| 1832 | #ifdef CONFIG_NUMA_BALANCING |
| 1833 | extern void |
| 1834 | show_numa_stats(struct task_struct *p, struct seq_file *m); |
| 1835 | extern void |
| 1836 | print_numa_stats(struct seq_file *m, int node, unsigned long tsf, |
| 1837 | unsigned long tpf, unsigned long gsf, unsigned long gpf); |
| 1838 | #endif /* CONFIG_NUMA_BALANCING */ |
| 1839 | #endif /* CONFIG_SCHED_DEBUG */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1840 | |
| 1841 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
Abel Vesa | 07c54f7 | 2015-03-03 13:50:27 +0200 | [diff] [blame] | 1842 | extern void init_rt_rq(struct rt_rq *rt_rq); |
| 1843 | extern void init_dl_rq(struct dl_rq *dl_rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1844 | |
Ben Segall | 1ee14e6 | 2013-10-16 11:16:12 -0700 | [diff] [blame] | 1845 | extern void cfs_bandwidth_usage_inc(void); |
| 1846 | extern void cfs_bandwidth_usage_dec(void); |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 1847 | |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 1848 | #ifdef CONFIG_NO_HZ_COMMON |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 1849 | enum rq_nohz_flag_bits { |
| 1850 | NOHZ_TICK_STOPPED, |
| 1851 | NOHZ_BALANCE_KICK, |
| 1852 | }; |
| 1853 | |
| 1854 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
Thomas Gleixner | 20a5c8c | 2016-03-10 12:54:20 +0100 | [diff] [blame] | 1855 | |
| 1856 | extern void nohz_balance_exit_idle(unsigned int cpu); |
| 1857 | #else |
| 1858 | static inline void nohz_balance_exit_idle(unsigned int cpu) { } |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 1859 | #endif |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1860 | |
| 1861 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 1862 | struct irqtime { |
Frederic Weisbecker | a499a5a | 2017-01-31 04:09:32 +0100 | [diff] [blame] | 1863 | u64 tick_delta; |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 1864 | u64 irq_start_time; |
| 1865 | struct u64_stats_sync sync; |
| 1866 | }; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1867 | |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 1868 | DECLARE_PER_CPU(struct irqtime, cpu_irqtime); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1869 | |
| 1870 | static inline u64 irq_time_read(int cpu) |
| 1871 | { |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 1872 | struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); |
Frederic Weisbecker | a499a5a | 2017-01-31 04:09:32 +0100 | [diff] [blame] | 1873 | u64 *cpustat = kcpustat_cpu(cpu).cpustat; |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 1874 | unsigned int seq; |
| 1875 | u64 total; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1876 | |
| 1877 | do { |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 1878 | seq = __u64_stats_fetch_begin(&irqtime->sync); |
Frederic Weisbecker | a499a5a | 2017-01-31 04:09:32 +0100 | [diff] [blame] | 1879 | total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ]; |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 1880 | } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1881 | |
Frederic Weisbecker | 19d23dbf | 2016-09-26 02:29:20 +0200 | [diff] [blame] | 1882 | return total; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1883 | } |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1884 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 1885 | |
| 1886 | #ifdef CONFIG_CPU_FREQ |
| 1887 | DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); |
| 1888 | |
| 1889 | /** |
| 1890 | * cpufreq_update_util - Take a note about CPU utilization changes. |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 1891 | * @rq: Runqueue to carry out the update for. |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 1892 | * @flags: Update reason flags. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 1893 | * |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 1894 | * This function is called by the scheduler on the CPU whose utilization is |
| 1895 | * being updated. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 1896 | * |
| 1897 | * It can only be called from RCU-sched read-side critical sections. |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 1898 | * |
| 1899 | * The way cpufreq is currently arranged requires it to evaluate the CPU |
| 1900 | * performance state (frequency/voltage) on a regular basis to prevent it from |
| 1901 | * being stuck in a completely inadequate performance level for too long. |
| 1902 | * That is not guaranteed to happen if the updates are only triggered from CFS, |
| 1903 | * though, because they may not be coming in if RT or deadline tasks are active |
| 1904 | * all the time (or there are RT and DL tasks only). |
| 1905 | * |
| 1906 | * As a workaround for that issue, this function is called by the RT and DL |
| 1907 | * sched classes to trigger extra cpufreq updates to prevent it from stalling, |
| 1908 | * but that really is a band-aid. Going forward it should be replaced with |
| 1909 | * solutions targeted more specifically at RT and DL tasks. |
| 1910 | */ |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 1911 | static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 1912 | { |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 1913 | struct update_util_data *data; |
| 1914 | |
| 1915 | data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); |
| 1916 | if (data) |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 1917 | data->func(data, rq_clock(rq), flags); |
| 1918 | } |
| 1919 | |
| 1920 | static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) |
| 1921 | { |
| 1922 | if (cpu_of(rq) == smp_processor_id()) |
| 1923 | cpufreq_update_util(rq, flags); |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 1924 | } |
| 1925 | #else |
Rafael J. Wysocki | 12bde33 | 2016-08-10 03:11:17 +0200 | [diff] [blame] | 1926 | static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} |
| 1927 | static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {} |
Rafael J. Wysocki | adaf9fc | 2016-03-10 20:44:47 +0100 | [diff] [blame] | 1928 | #endif /* CONFIG_CPU_FREQ */ |
Linus Torvalds | be53f58 | 2016-03-24 09:42:50 -0700 | [diff] [blame] | 1929 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 1930 | #ifdef arch_scale_freq_capacity |
| 1931 | #ifndef arch_scale_freq_invariant |
| 1932 | #define arch_scale_freq_invariant() (true) |
| 1933 | #endif |
| 1934 | #else /* arch_scale_freq_capacity */ |
| 1935 | #define arch_scale_freq_invariant() (false) |
| 1936 | #endif |