Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1 | |
| 2 | #include <linux/sched.h> |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 3 | #include <linux/sched/sysctl.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 4 | #include <linux/sched/rt.h> |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 5 | #include <linux/sched/deadline.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 6 | #include <linux/mutex.h> |
| 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/stop_machine.h> |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 9 | #include <linux/irq_work.h> |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 10 | #include <linux/tick.h> |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 11 | #include <linux/slab.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 12 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 13 | #include "cpupri.h" |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 14 | #include "cpudeadline.h" |
Li Zefan | 60fed78 | 2013-03-29 14:36:43 +0800 | [diff] [blame] | 15 | #include "cpuacct.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 16 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 17 | struct rq; |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 18 | struct cpuidle_state; |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 19 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 20 | /* task_struct::on_rq states: */ |
| 21 | #define TASK_ON_RQ_QUEUED 1 |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 22 | #define TASK_ON_RQ_MIGRATING 2 |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 23 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 24 | extern __read_mostly int scheduler_running; |
| 25 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 26 | extern unsigned long calc_load_update; |
| 27 | extern atomic_long_t calc_load_tasks; |
| 28 | |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 29 | extern void calc_global_load_tick(struct rq *this_rq); |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 30 | extern long calc_load_fold_active(struct rq *this_rq); |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 31 | |
| 32 | #ifdef CONFIG_SMP |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 33 | extern void update_cpu_load_active(struct rq *this_rq); |
Peter Zijlstra | 3289bdb | 2015-04-14 13:19:42 +0200 | [diff] [blame] | 34 | #else |
| 35 | static inline void update_cpu_load_active(struct rq *this_rq) { } |
| 36 | #endif |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 37 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 38 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 39 | * Helpers for converting nanosecond timing to jiffy resolution |
| 40 | */ |
| 41 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
| 42 | |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 43 | /* |
| 44 | * Increase resolution of nice-level calculations for 64-bit architectures. |
| 45 | * The extra resolution improves shares distribution and load balancing of |
| 46 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup |
| 47 | * hierarchies, especially on larger systems. This is not a user-visible change |
| 48 | * and does not change the user-interface for setting shares/weights. |
| 49 | * |
| 50 | * We increase resolution only if we have enough bits to allow this increased |
| 51 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution |
| 52 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the |
| 53 | * increased costs. |
| 54 | */ |
| 55 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ |
| 56 | # define SCHED_LOAD_RESOLUTION 10 |
| 57 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) |
| 58 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) |
| 59 | #else |
| 60 | # define SCHED_LOAD_RESOLUTION 0 |
| 61 | # define scale_load(w) (w) |
| 62 | # define scale_load_down(w) (w) |
| 63 | #endif |
| 64 | |
| 65 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) |
| 66 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) |
| 67 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 68 | #define NICE_0_LOAD SCHED_LOAD_SCALE |
| 69 | #define NICE_0_SHIFT SCHED_LOAD_SHIFT |
| 70 | |
| 71 | /* |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 72 | * Single value that decides SCHED_DEADLINE internal math precision. |
| 73 | * 10 -> just above 1us |
| 74 | * 9 -> just above 0.5us |
| 75 | */ |
| 76 | #define DL_SCALE (10) |
| 77 | |
| 78 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 79 | * These are the 'tuning knobs' of the scheduler: |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 80 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 81 | |
| 82 | /* |
| 83 | * single value that denotes runtime == period, ie unlimited time. |
| 84 | */ |
| 85 | #define RUNTIME_INF ((u64)~0ULL) |
| 86 | |
Henrik Austad | 20f9cd2 | 2015-09-09 17:00:41 +0200 | [diff] [blame] | 87 | static inline int idle_policy(int policy) |
| 88 | { |
| 89 | return policy == SCHED_IDLE; |
| 90 | } |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 91 | static inline int fair_policy(int policy) |
| 92 | { |
| 93 | return policy == SCHED_NORMAL || policy == SCHED_BATCH; |
| 94 | } |
| 95 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 96 | static inline int rt_policy(int policy) |
| 97 | { |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 98 | return policy == SCHED_FIFO || policy == SCHED_RR; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 99 | } |
| 100 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 101 | static inline int dl_policy(int policy) |
| 102 | { |
| 103 | return policy == SCHED_DEADLINE; |
| 104 | } |
Henrik Austad | 20f9cd2 | 2015-09-09 17:00:41 +0200 | [diff] [blame] | 105 | static inline bool valid_policy(int policy) |
| 106 | { |
| 107 | return idle_policy(policy) || fair_policy(policy) || |
| 108 | rt_policy(policy) || dl_policy(policy); |
| 109 | } |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 110 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 111 | static inline int task_has_rt_policy(struct task_struct *p) |
| 112 | { |
| 113 | return rt_policy(p->policy); |
| 114 | } |
| 115 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 116 | static inline int task_has_dl_policy(struct task_struct *p) |
| 117 | { |
| 118 | return dl_policy(p->policy); |
| 119 | } |
| 120 | |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 121 | /* |
| 122 | * Tells if entity @a should preempt entity @b. |
| 123 | */ |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 124 | static inline bool |
| 125 | dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 126 | { |
| 127 | return dl_time_before(a->deadline, b->deadline); |
| 128 | } |
| 129 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 130 | /* |
| 131 | * This is the priority-queue data structure of the RT scheduling class: |
| 132 | */ |
| 133 | struct rt_prio_array { |
| 134 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| 135 | struct list_head queue[MAX_RT_PRIO]; |
| 136 | }; |
| 137 | |
| 138 | struct rt_bandwidth { |
| 139 | /* nests inside the rq lock: */ |
| 140 | raw_spinlock_t rt_runtime_lock; |
| 141 | ktime_t rt_period; |
| 142 | u64 rt_runtime; |
| 143 | struct hrtimer rt_period_timer; |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 144 | unsigned int rt_period_active; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 145 | }; |
Juri Lelli | a5e7be3 | 2014-09-19 10:22:39 +0100 | [diff] [blame] | 146 | |
| 147 | void __dl_clear_params(struct task_struct *p); |
| 148 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 149 | /* |
| 150 | * To keep the bandwidth of -deadline tasks and groups under control |
| 151 | * we need some place where: |
| 152 | * - store the maximum -deadline bandwidth of the system (the group); |
| 153 | * - cache the fraction of that bandwidth that is currently allocated. |
| 154 | * |
| 155 | * This is all done in the data structure below. It is similar to the |
| 156 | * one used for RT-throttling (rt_bandwidth), with the main difference |
| 157 | * that, since here we are only interested in admission control, we |
| 158 | * do not decrease any runtime while the group "executes", neither we |
| 159 | * need a timer to replenish it. |
| 160 | * |
| 161 | * With respect to SMP, the bandwidth is given on a per-CPU basis, |
| 162 | * meaning that: |
| 163 | * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; |
| 164 | * - dl_total_bw array contains, in the i-eth element, the currently |
| 165 | * allocated bandwidth on the i-eth CPU. |
| 166 | * Moreover, groups consume bandwidth on each CPU, while tasks only |
| 167 | * consume bandwidth on the CPU they're running on. |
| 168 | * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw |
| 169 | * that will be shown the next time the proc or cgroup controls will |
| 170 | * be red. It on its turn can be changed by writing on its own |
| 171 | * control. |
| 172 | */ |
| 173 | struct dl_bandwidth { |
| 174 | raw_spinlock_t dl_runtime_lock; |
| 175 | u64 dl_runtime; |
| 176 | u64 dl_period; |
| 177 | }; |
| 178 | |
| 179 | static inline int dl_bandwidth_enabled(void) |
| 180 | { |
Peter Zijlstra | 1724813 | 2013-12-17 12:44:49 +0100 | [diff] [blame] | 181 | return sysctl_sched_rt_runtime >= 0; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | extern struct dl_bw *dl_bw_of(int i); |
| 185 | |
| 186 | struct dl_bw { |
| 187 | raw_spinlock_t lock; |
| 188 | u64 bw, total_bw; |
| 189 | }; |
| 190 | |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 191 | static inline |
| 192 | void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) |
| 193 | { |
| 194 | dl_b->total_bw -= tsk_bw; |
| 195 | } |
| 196 | |
| 197 | static inline |
| 198 | void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) |
| 199 | { |
| 200 | dl_b->total_bw += tsk_bw; |
| 201 | } |
| 202 | |
| 203 | static inline |
| 204 | bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) |
| 205 | { |
| 206 | return dl_b->bw != -1 && |
| 207 | dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; |
| 208 | } |
| 209 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 210 | extern struct mutex sched_domains_mutex; |
| 211 | |
| 212 | #ifdef CONFIG_CGROUP_SCHED |
| 213 | |
| 214 | #include <linux/cgroup.h> |
| 215 | |
| 216 | struct cfs_rq; |
| 217 | struct rt_rq; |
| 218 | |
Mike Galbraith | 35cf4e5 | 2012-08-07 05:00:13 +0200 | [diff] [blame] | 219 | extern struct list_head task_groups; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 220 | |
| 221 | struct cfs_bandwidth { |
| 222 | #ifdef CONFIG_CFS_BANDWIDTH |
| 223 | raw_spinlock_t lock; |
| 224 | ktime_t period; |
| 225 | u64 quota, runtime; |
Zhihui Zhang | 9c58c79 | 2014-09-20 21:24:36 -0400 | [diff] [blame] | 226 | s64 hierarchical_quota; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 227 | u64 runtime_expires; |
| 228 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 229 | int idle, period_active; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 230 | struct hrtimer period_timer, slack_timer; |
| 231 | struct list_head throttled_cfs_rq; |
| 232 | |
| 233 | /* statistics */ |
| 234 | int nr_periods, nr_throttled; |
| 235 | u64 throttled_time; |
| 236 | #endif |
| 237 | }; |
| 238 | |
| 239 | /* task group related information */ |
| 240 | struct task_group { |
| 241 | struct cgroup_subsys_state css; |
| 242 | |
| 243 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 244 | /* schedulable entities of this group on each cpu */ |
| 245 | struct sched_entity **se; |
| 246 | /* runqueue "owned" by this group on each cpu */ |
| 247 | struct cfs_rq **cfs_rq; |
| 248 | unsigned long shares; |
| 249 | |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 250 | #ifdef CONFIG_SMP |
Alex Shi | bf5b986 | 2013-06-20 10:18:54 +0800 | [diff] [blame] | 251 | atomic_long_t load_avg; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 252 | #endif |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 253 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 254 | |
| 255 | #ifdef CONFIG_RT_GROUP_SCHED |
| 256 | struct sched_rt_entity **rt_se; |
| 257 | struct rt_rq **rt_rq; |
| 258 | |
| 259 | struct rt_bandwidth rt_bandwidth; |
| 260 | #endif |
| 261 | |
| 262 | struct rcu_head rcu; |
| 263 | struct list_head list; |
| 264 | |
| 265 | struct task_group *parent; |
| 266 | struct list_head siblings; |
| 267 | struct list_head children; |
| 268 | |
| 269 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 270 | struct autogroup *autogroup; |
| 271 | #endif |
| 272 | |
| 273 | struct cfs_bandwidth cfs_bandwidth; |
| 274 | }; |
| 275 | |
| 276 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 277 | #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
| 278 | |
| 279 | /* |
| 280 | * A weight of 0 or 1 can cause arithmetics problems. |
| 281 | * A weight of a cfs_rq is the sum of weights of which entities |
| 282 | * are queued on this cfs_rq, so a weight of a entity should not be |
| 283 | * too large, so as the shares value of a task group. |
| 284 | * (The default weight is 1024 - so there's no practical |
| 285 | * limitation from this.) |
| 286 | */ |
| 287 | #define MIN_SHARES (1UL << 1) |
| 288 | #define MAX_SHARES (1UL << 18) |
| 289 | #endif |
| 290 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 291 | typedef int (*tg_visitor)(struct task_group *, void *); |
| 292 | |
| 293 | extern int walk_tg_tree_from(struct task_group *from, |
| 294 | tg_visitor down, tg_visitor up, void *data); |
| 295 | |
| 296 | /* |
| 297 | * Iterate the full tree, calling @down when first entering a node and @up when |
| 298 | * leaving it for the final time. |
| 299 | * |
| 300 | * Caller must hold rcu_lock or sufficient equivalent. |
| 301 | */ |
| 302 | static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| 303 | { |
| 304 | return walk_tg_tree_from(&root_task_group, down, up, data); |
| 305 | } |
| 306 | |
| 307 | extern int tg_nop(struct task_group *tg, void *data); |
| 308 | |
| 309 | extern void free_fair_sched_group(struct task_group *tg); |
| 310 | extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); |
| 311 | extern void unregister_fair_sched_group(struct task_group *tg, int cpu); |
| 312 | extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| 313 | struct sched_entity *se, int cpu, |
| 314 | struct sched_entity *parent); |
| 315 | extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
| 316 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
| 317 | |
| 318 | extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 77a4d1a | 2015-04-15 11:41:57 +0200 | [diff] [blame] | 319 | extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 320 | extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); |
| 321 | |
| 322 | extern void free_rt_sched_group(struct task_group *tg); |
| 323 | extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); |
| 324 | extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
| 325 | struct sched_rt_entity *rt_se, int cpu, |
| 326 | struct sched_rt_entity *parent); |
| 327 | |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 328 | extern struct task_group *sched_create_group(struct task_group *parent); |
| 329 | extern void sched_online_group(struct task_group *tg, |
| 330 | struct task_group *parent); |
| 331 | extern void sched_destroy_group(struct task_group *tg); |
| 332 | extern void sched_offline_group(struct task_group *tg); |
| 333 | |
| 334 | extern void sched_move_task(struct task_struct *tsk); |
| 335 | |
| 336 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 337 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
Byungchul Park | ad936d8 | 2015-10-24 01:16:19 +0900 | [diff] [blame] | 338 | |
| 339 | #ifdef CONFIG_SMP |
| 340 | extern void set_task_rq_fair(struct sched_entity *se, |
| 341 | struct cfs_rq *prev, struct cfs_rq *next); |
| 342 | #else /* !CONFIG_SMP */ |
| 343 | static inline void set_task_rq_fair(struct sched_entity *se, |
| 344 | struct cfs_rq *prev, struct cfs_rq *next) { } |
| 345 | #endif /* CONFIG_SMP */ |
| 346 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 347 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 348 | #else /* CONFIG_CGROUP_SCHED */ |
| 349 | |
| 350 | struct cfs_bandwidth { }; |
| 351 | |
| 352 | #endif /* CONFIG_CGROUP_SCHED */ |
| 353 | |
| 354 | /* CFS-related fields in a runqueue */ |
| 355 | struct cfs_rq { |
| 356 | struct load_weight load; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 357 | unsigned int nr_running, h_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 358 | |
| 359 | u64 exec_clock; |
| 360 | u64 min_vruntime; |
| 361 | #ifndef CONFIG_64BIT |
| 362 | u64 min_vruntime_copy; |
| 363 | #endif |
| 364 | |
| 365 | struct rb_root tasks_timeline; |
| 366 | struct rb_node *rb_leftmost; |
| 367 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 368 | /* |
| 369 | * 'curr' points to currently running entity on this cfs_rq. |
| 370 | * It is set to NULL otherwise (i.e when none are currently running). |
| 371 | */ |
| 372 | struct sched_entity *curr, *next, *last, *skip; |
| 373 | |
| 374 | #ifdef CONFIG_SCHED_DEBUG |
| 375 | unsigned int nr_spread_over; |
| 376 | #endif |
| 377 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 378 | #ifdef CONFIG_SMP |
| 379 | /* |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 380 | * CFS load tracking |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 381 | */ |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 382 | struct sched_avg avg; |
Yuyang Du | 1396223 | 2015-07-15 08:04:41 +0800 | [diff] [blame] | 383 | u64 runnable_load_sum; |
| 384 | unsigned long runnable_load_avg; |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 385 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 386 | unsigned long tg_load_avg_contrib; |
| 387 | #endif |
| 388 | atomic_long_t removed_load_avg, removed_util_avg; |
| 389 | #ifndef CONFIG_64BIT |
| 390 | u64 load_last_update_time_copy; |
| 391 | #endif |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 392 | |
Paul Turner | c566e8e | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 393 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 394 | /* |
| 395 | * h_load = weight * f(tg) |
| 396 | * |
| 397 | * Where f(tg) is the recursive weight fraction assigned to |
| 398 | * this group. |
| 399 | */ |
| 400 | unsigned long h_load; |
Vladimir Davydov | 6852079 | 2013-07-15 17:49:19 +0400 | [diff] [blame] | 401 | u64 last_h_load_update; |
| 402 | struct sched_entity *h_load_next; |
| 403 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 404 | #endif /* CONFIG_SMP */ |
| 405 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 406 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 407 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| 408 | |
| 409 | /* |
| 410 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
| 411 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| 412 | * (like users, containers etc.) |
| 413 | * |
| 414 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
| 415 | * list is used during load balance. |
| 416 | */ |
| 417 | int on_list; |
| 418 | struct list_head leaf_cfs_rq_list; |
| 419 | struct task_group *tg; /* group that "owns" this runqueue */ |
| 420 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 421 | #ifdef CONFIG_CFS_BANDWIDTH |
| 422 | int runtime_enabled; |
| 423 | u64 runtime_expires; |
| 424 | s64 runtime_remaining; |
| 425 | |
Paul Turner | f1b1728 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 426 | u64 throttled_clock, throttled_clock_task; |
| 427 | u64 throttled_clock_task_time; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 428 | int throttled, throttle_count; |
| 429 | struct list_head throttled_list; |
| 430 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 431 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 432 | }; |
| 433 | |
| 434 | static inline int rt_bandwidth_enabled(void) |
| 435 | { |
| 436 | return sysctl_sched_rt_runtime >= 0; |
| 437 | } |
| 438 | |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 439 | /* RT IPI pull logic requires IRQ_WORK */ |
| 440 | #ifdef CONFIG_IRQ_WORK |
| 441 | # define HAVE_RT_PUSH_IPI |
| 442 | #endif |
| 443 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 444 | /* Real-Time classes' related field in a runqueue: */ |
| 445 | struct rt_rq { |
| 446 | struct rt_prio_array active; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 447 | unsigned int rt_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 448 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
| 449 | struct { |
| 450 | int curr; /* highest queued rt task prio */ |
| 451 | #ifdef CONFIG_SMP |
| 452 | int next; /* next highest */ |
| 453 | #endif |
| 454 | } highest_prio; |
| 455 | #endif |
| 456 | #ifdef CONFIG_SMP |
| 457 | unsigned long rt_nr_migratory; |
| 458 | unsigned long rt_nr_total; |
| 459 | int overloaded; |
| 460 | struct plist_head pushable_tasks; |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 461 | #ifdef HAVE_RT_PUSH_IPI |
| 462 | int push_flags; |
| 463 | int push_cpu; |
| 464 | struct irq_work push_work; |
| 465 | raw_spinlock_t push_lock; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 466 | #endif |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 467 | #endif /* CONFIG_SMP */ |
Kirill Tkhai | f4ebcbc | 2014-03-15 02:15:00 +0400 | [diff] [blame] | 468 | int rt_queued; |
| 469 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 470 | int rt_throttled; |
| 471 | u64 rt_time; |
| 472 | u64 rt_runtime; |
| 473 | /* Nests inside the rq lock: */ |
| 474 | raw_spinlock_t rt_runtime_lock; |
| 475 | |
| 476 | #ifdef CONFIG_RT_GROUP_SCHED |
| 477 | unsigned long rt_nr_boosted; |
| 478 | |
| 479 | struct rq *rq; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 480 | struct task_group *tg; |
| 481 | #endif |
| 482 | }; |
| 483 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 484 | /* Deadline class' related fields in a runqueue */ |
| 485 | struct dl_rq { |
| 486 | /* runqueue is an rbtree, ordered by deadline */ |
| 487 | struct rb_root rb_root; |
| 488 | struct rb_node *rb_leftmost; |
| 489 | |
| 490 | unsigned long dl_nr_running; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 491 | |
| 492 | #ifdef CONFIG_SMP |
| 493 | /* |
| 494 | * Deadline values of the currently executing and the |
| 495 | * earliest ready task on this rq. Caching these facilitates |
| 496 | * the decision wether or not a ready but not running task |
| 497 | * should migrate somewhere else. |
| 498 | */ |
| 499 | struct { |
| 500 | u64 curr; |
| 501 | u64 next; |
| 502 | } earliest_dl; |
| 503 | |
| 504 | unsigned long dl_nr_migratory; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 505 | int overloaded; |
| 506 | |
| 507 | /* |
| 508 | * Tasks on this rq that can be pushed away. They are kept in |
| 509 | * an rb-tree, ordered by tasks' deadlines, with caching |
| 510 | * of the leftmost (earliest deadline) element. |
| 511 | */ |
| 512 | struct rb_root pushable_dl_tasks_root; |
| 513 | struct rb_node *pushable_dl_tasks_leftmost; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 514 | #else |
| 515 | struct dl_bw dl_bw; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 516 | #endif |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 517 | }; |
| 518 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 519 | #ifdef CONFIG_SMP |
| 520 | |
| 521 | /* |
| 522 | * We add the notion of a root-domain which will be used to define per-domain |
| 523 | * variables. Each exclusive cpuset essentially defines an island domain by |
| 524 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
| 525 | * exclusive cpuset is created, we also create and attach a new root-domain |
| 526 | * object. |
| 527 | * |
| 528 | */ |
| 529 | struct root_domain { |
| 530 | atomic_t refcount; |
| 531 | atomic_t rto_count; |
| 532 | struct rcu_head rcu; |
| 533 | cpumask_var_t span; |
| 534 | cpumask_var_t online; |
| 535 | |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 536 | /* Indicate more than one runnable task for any CPU */ |
| 537 | bool overload; |
| 538 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 539 | /* |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 540 | * The bit corresponding to a CPU gets set here if such CPU has more |
| 541 | * than one runnable -deadline task (as it is below for RT tasks). |
| 542 | */ |
| 543 | cpumask_var_t dlo_mask; |
| 544 | atomic_t dlo_count; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 545 | struct dl_bw dl_bw; |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 546 | struct cpudl cpudl; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 547 | |
| 548 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 549 | * The "RT overload" flag: it gets set if a CPU has more than |
| 550 | * one runnable RT task. |
| 551 | */ |
| 552 | cpumask_var_t rto_mask; |
| 553 | struct cpupri cpupri; |
| 554 | }; |
| 555 | |
| 556 | extern struct root_domain def_root_domain; |
| 557 | |
| 558 | #endif /* CONFIG_SMP */ |
| 559 | |
| 560 | /* |
| 561 | * This is the main, per-CPU runqueue data structure. |
| 562 | * |
| 563 | * Locking rule: those places that want to lock multiple runqueues |
| 564 | * (such as the load balancing or the thread migration code), lock |
| 565 | * acquire operations must be ordered by ascending &runqueue. |
| 566 | */ |
| 567 | struct rq { |
| 568 | /* runqueue lock: */ |
| 569 | raw_spinlock_t lock; |
| 570 | |
| 571 | /* |
| 572 | * nr_running and cpu_load should be in the same cacheline because |
| 573 | * remote CPUs use both these fields when doing load calculation. |
| 574 | */ |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 575 | unsigned int nr_running; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 576 | #ifdef CONFIG_NUMA_BALANCING |
| 577 | unsigned int nr_numa_running; |
| 578 | unsigned int nr_preferred_running; |
| 579 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 580 | #define CPU_LOAD_IDX_MAX 5 |
| 581 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
| 582 | unsigned long last_load_update_tick; |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 583 | #ifdef CONFIG_NO_HZ_COMMON |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 584 | u64 nohz_stamp; |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 585 | unsigned long nohz_flags; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 586 | #endif |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 587 | #ifdef CONFIG_NO_HZ_FULL |
| 588 | unsigned long last_sched_tick; |
| 589 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 590 | /* capture load from *all* tasks on this cpu: */ |
| 591 | struct load_weight load; |
| 592 | unsigned long nr_load_updates; |
| 593 | u64 nr_switches; |
| 594 | |
| 595 | struct cfs_rq cfs; |
| 596 | struct rt_rq rt; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 597 | struct dl_rq dl; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 598 | |
| 599 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 600 | /* list of leaf cfs_rq on this cpu: */ |
| 601 | struct list_head leaf_cfs_rq_list; |
Peter Zijlstra | a35b646 | 2012-08-08 21:46:40 +0200 | [diff] [blame] | 602 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 603 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 604 | /* |
| 605 | * This is part of a global counter where only the total sum |
| 606 | * over all CPUs matters. A task can increase this counter on |
| 607 | * one CPU and if it got migrated afterwards it may decrease |
| 608 | * it on another CPU. Always updated under the runqueue lock: |
| 609 | */ |
| 610 | unsigned long nr_uninterruptible; |
| 611 | |
| 612 | struct task_struct *curr, *idle, *stop; |
| 613 | unsigned long next_balance; |
| 614 | struct mm_struct *prev_mm; |
| 615 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 616 | unsigned int clock_skip_update; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 617 | u64 clock; |
| 618 | u64 clock_task; |
| 619 | |
| 620 | atomic_t nr_iowait; |
| 621 | |
| 622 | #ifdef CONFIG_SMP |
| 623 | struct root_domain *rd; |
| 624 | struct sched_domain *sd; |
| 625 | |
Nicolas Pitre | ced549f | 2014-05-26 18:19:38 -0400 | [diff] [blame] | 626 | unsigned long cpu_capacity; |
Vincent Guittot | ca6d75e | 2015-02-27 16:54:09 +0100 | [diff] [blame] | 627 | unsigned long cpu_capacity_orig; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 628 | |
Peter Zijlstra | e3fca9e | 2015-06-11 14:46:37 +0200 | [diff] [blame] | 629 | struct callback_head *balance_callback; |
| 630 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 631 | unsigned char idle_balance; |
| 632 | /* For active balancing */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 633 | int active_balance; |
| 634 | int push_cpu; |
| 635 | struct cpu_stop_work active_balance_work; |
| 636 | /* cpu of this runqueue: */ |
| 637 | int cpu; |
| 638 | int online; |
| 639 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 640 | struct list_head cfs_tasks; |
| 641 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 642 | u64 rt_avg; |
| 643 | u64 age_stamp; |
| 644 | u64 idle_stamp; |
| 645 | u64 avg_idle; |
Jason Low | 9bd721c | 2013-09-13 11:26:52 -0700 | [diff] [blame] | 646 | |
| 647 | /* This is used to determine avg_idle's max value */ |
| 648 | u64 max_idle_balance_cost; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 649 | #endif |
| 650 | |
| 651 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 652 | u64 prev_irq_time; |
| 653 | #endif |
| 654 | #ifdef CONFIG_PARAVIRT |
| 655 | u64 prev_steal_time; |
| 656 | #endif |
| 657 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 658 | u64 prev_steal_time_rq; |
| 659 | #endif |
| 660 | |
| 661 | /* calc_load related fields */ |
| 662 | unsigned long calc_load_update; |
| 663 | long calc_load_active; |
| 664 | |
| 665 | #ifdef CONFIG_SCHED_HRTICK |
| 666 | #ifdef CONFIG_SMP |
| 667 | int hrtick_csd_pending; |
| 668 | struct call_single_data hrtick_csd; |
| 669 | #endif |
| 670 | struct hrtimer hrtick_timer; |
| 671 | #endif |
| 672 | |
| 673 | #ifdef CONFIG_SCHEDSTATS |
| 674 | /* latency stats */ |
| 675 | struct sched_info rq_sched_info; |
| 676 | unsigned long long rq_cpu_time; |
| 677 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
| 678 | |
| 679 | /* sys_sched_yield() stats */ |
| 680 | unsigned int yld_count; |
| 681 | |
| 682 | /* schedule() stats */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 683 | unsigned int sched_count; |
| 684 | unsigned int sched_goidle; |
| 685 | |
| 686 | /* try_to_wake_up() stats */ |
| 687 | unsigned int ttwu_count; |
| 688 | unsigned int ttwu_local; |
| 689 | #endif |
| 690 | |
| 691 | #ifdef CONFIG_SMP |
| 692 | struct llist_head wake_list; |
| 693 | #endif |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 694 | |
| 695 | #ifdef CONFIG_CPU_IDLE |
| 696 | /* Must be inspected within a rcu lock section */ |
| 697 | struct cpuidle_state *idle_state; |
| 698 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 699 | }; |
| 700 | |
| 701 | static inline int cpu_of(struct rq *rq) |
| 702 | { |
| 703 | #ifdef CONFIG_SMP |
| 704 | return rq->cpu; |
| 705 | #else |
| 706 | return 0; |
| 707 | #endif |
| 708 | } |
| 709 | |
Pranith Kumar | 8b06c55 | 2014-08-13 13:28:12 -0400 | [diff] [blame] | 710 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 711 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 712 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 713 | #define this_rq() this_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 714 | #define task_rq(p) cpu_rq(task_cpu(p)) |
| 715 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 716 | #define raw_rq() raw_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 717 | |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 718 | static inline u64 __rq_clock_broken(struct rq *rq) |
| 719 | { |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 720 | return READ_ONCE(rq->clock); |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 721 | } |
| 722 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 723 | static inline u64 rq_clock(struct rq *rq) |
| 724 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 725 | lockdep_assert_held(&rq->lock); |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 726 | return rq->clock; |
| 727 | } |
| 728 | |
| 729 | static inline u64 rq_clock_task(struct rq *rq) |
| 730 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 731 | lockdep_assert_held(&rq->lock); |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 732 | return rq->clock_task; |
| 733 | } |
| 734 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 735 | #define RQCF_REQ_SKIP 0x01 |
| 736 | #define RQCF_ACT_SKIP 0x02 |
| 737 | |
| 738 | static inline void rq_clock_skip_update(struct rq *rq, bool skip) |
| 739 | { |
| 740 | lockdep_assert_held(&rq->lock); |
| 741 | if (skip) |
| 742 | rq->clock_skip_update |= RQCF_REQ_SKIP; |
| 743 | else |
| 744 | rq->clock_skip_update &= ~RQCF_REQ_SKIP; |
| 745 | } |
| 746 | |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 747 | #ifdef CONFIG_NUMA |
Rik van Riel | e3fe70b | 2014-10-17 03:29:50 -0400 | [diff] [blame] | 748 | enum numa_topology_type { |
| 749 | NUMA_DIRECT, |
| 750 | NUMA_GLUELESS_MESH, |
| 751 | NUMA_BACKPLANE, |
| 752 | }; |
| 753 | extern enum numa_topology_type sched_numa_topology_type; |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 754 | extern int sched_max_numa_distance; |
| 755 | extern bool find_numa_distance(int distance); |
| 756 | #endif |
| 757 | |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 758 | #ifdef CONFIG_NUMA_BALANCING |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 759 | /* The regions in numa_faults array from task_struct */ |
| 760 | enum numa_faults_stats { |
| 761 | NUMA_MEM = 0, |
| 762 | NUMA_CPU, |
| 763 | NUMA_MEMBUF, |
| 764 | NUMA_CPUBUF |
| 765 | }; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 766 | extern void sched_setnuma(struct task_struct *p, int node); |
Mel Gorman | e6628d5 | 2013-10-07 11:29:02 +0100 | [diff] [blame] | 767 | extern int migrate_task_to(struct task_struct *p, int cpu); |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 768 | extern int migrate_swap(struct task_struct *, struct task_struct *); |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 769 | #endif /* CONFIG_NUMA_BALANCING */ |
| 770 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 771 | #ifdef CONFIG_SMP |
| 772 | |
Peter Zijlstra | e3fca9e | 2015-06-11 14:46:37 +0200 | [diff] [blame] | 773 | static inline void |
| 774 | queue_balance_callback(struct rq *rq, |
| 775 | struct callback_head *head, |
| 776 | void (*func)(struct rq *rq)) |
| 777 | { |
| 778 | lockdep_assert_held(&rq->lock); |
| 779 | |
| 780 | if (unlikely(head->next)) |
| 781 | return; |
| 782 | |
| 783 | head->func = (void (*)(struct callback_head *))func; |
| 784 | head->next = rq->balance_callback; |
| 785 | rq->balance_callback = head; |
| 786 | } |
| 787 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 788 | extern void sched_ttwu_pending(void); |
| 789 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 790 | #define rcu_dereference_check_sched_domain(p) \ |
| 791 | rcu_dereference_check((p), \ |
| 792 | lockdep_is_held(&sched_domains_mutex)) |
| 793 | |
| 794 | /* |
| 795 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
| 796 | * See detach_destroy_domains: synchronize_sched for details. |
| 797 | * |
| 798 | * The domain tree of any CPU may only be accessed from within |
| 799 | * preempt-disabled sections. |
| 800 | */ |
| 801 | #define for_each_domain(cpu, __sd) \ |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 802 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ |
| 803 | __sd; __sd = __sd->parent) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 804 | |
Suresh Siddha | 77e8136 | 2011-11-17 11:08:23 -0800 | [diff] [blame] | 805 | #define for_each_lower_domain(sd) for (; sd; sd = sd->child) |
| 806 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 807 | /** |
| 808 | * highest_flag_domain - Return highest sched_domain containing flag. |
| 809 | * @cpu: The cpu whose highest level of sched domain is to |
| 810 | * be returned. |
| 811 | * @flag: The flag to check for the highest sched_domain |
| 812 | * for the given cpu. |
| 813 | * |
| 814 | * Returns the highest sched_domain of a cpu which contains the given flag. |
| 815 | */ |
| 816 | static inline struct sched_domain *highest_flag_domain(int cpu, int flag) |
| 817 | { |
| 818 | struct sched_domain *sd, *hsd = NULL; |
| 819 | |
| 820 | for_each_domain(cpu, sd) { |
| 821 | if (!(sd->flags & flag)) |
| 822 | break; |
| 823 | hsd = sd; |
| 824 | } |
| 825 | |
| 826 | return hsd; |
| 827 | } |
| 828 | |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 829 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) |
| 830 | { |
| 831 | struct sched_domain *sd; |
| 832 | |
| 833 | for_each_domain(cpu, sd) { |
| 834 | if (sd->flags & flag) |
| 835 | break; |
| 836 | } |
| 837 | |
| 838 | return sd; |
| 839 | } |
| 840 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 841 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
Peter Zijlstra | 7d9ffa8 | 2013-07-04 12:56:46 +0800 | [diff] [blame] | 842 | DECLARE_PER_CPU(int, sd_llc_size); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 843 | DECLARE_PER_CPU(int, sd_llc_id); |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 844 | DECLARE_PER_CPU(struct sched_domain *, sd_numa); |
Preeti U Murthy | 37dc6b5 | 2013-10-30 08:42:52 +0530 | [diff] [blame] | 845 | DECLARE_PER_CPU(struct sched_domain *, sd_busy); |
| 846 | DECLARE_PER_CPU(struct sched_domain *, sd_asym); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 847 | |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 848 | struct sched_group_capacity { |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 849 | atomic_t ref; |
| 850 | /* |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 851 | * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity |
| 852 | * for a single CPU. |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 853 | */ |
Vincent Guittot | dc7ff76 | 2015-03-03 11:35:03 +0100 | [diff] [blame] | 854 | unsigned int capacity; |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 855 | unsigned long next_update; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 856 | int imbalance; /* XXX unrelated to capacity but shared group state */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 857 | /* |
| 858 | * Number of busy cpus in this group. |
| 859 | */ |
| 860 | atomic_t nr_busy_cpus; |
| 861 | |
| 862 | unsigned long cpumask[0]; /* iteration mask */ |
| 863 | }; |
| 864 | |
| 865 | struct sched_group { |
| 866 | struct sched_group *next; /* Must be a circular list */ |
| 867 | atomic_t ref; |
| 868 | |
| 869 | unsigned int group_weight; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 870 | struct sched_group_capacity *sgc; |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 871 | |
| 872 | /* |
| 873 | * The CPUs this group covers. |
| 874 | * |
| 875 | * NOTE: this field is variable length. (Allocated dynamically |
| 876 | * by attaching extra space to the end of the structure, |
| 877 | * depending on how many CPUs the kernel has booted up with) |
| 878 | */ |
| 879 | unsigned long cpumask[0]; |
| 880 | }; |
| 881 | |
| 882 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
| 883 | { |
| 884 | return to_cpumask(sg->cpumask); |
| 885 | } |
| 886 | |
| 887 | /* |
| 888 | * cpumask masking which cpus in the group are allowed to iterate up the domain |
| 889 | * tree. |
| 890 | */ |
| 891 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) |
| 892 | { |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 893 | return to_cpumask(sg->sgc->cpumask); |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 894 | } |
| 895 | |
| 896 | /** |
| 897 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. |
| 898 | * @group: The group whose first cpu is to be returned. |
| 899 | */ |
| 900 | static inline unsigned int group_first_cpu(struct sched_group *group) |
| 901 | { |
| 902 | return cpumask_first(sched_group_cpus(group)); |
| 903 | } |
| 904 | |
Peter Zijlstra | c117487 | 2012-05-31 14:47:33 +0200 | [diff] [blame] | 905 | extern int group_balance_cpu(struct sched_group *sg); |
| 906 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 907 | #else |
| 908 | |
| 909 | static inline void sched_ttwu_pending(void) { } |
| 910 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 911 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 912 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 913 | #include "stats.h" |
| 914 | #include "auto_group.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 915 | |
| 916 | #ifdef CONFIG_CGROUP_SCHED |
| 917 | |
| 918 | /* |
| 919 | * Return the group to which this tasks belongs. |
| 920 | * |
Tejun Heo | 8af01f5 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 921 | * We cannot use task_css() and friends because the cgroup subsystem |
| 922 | * changes that value before the cgroup_subsys::attach() method is called, |
| 923 | * therefore we cannot pin it and might observe the wrong value. |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 924 | * |
| 925 | * The same is true for autogroup's p->signal->autogroup->tg, the autogroup |
| 926 | * core changes this before calling sched_move_task(). |
| 927 | * |
| 928 | * Instead we use a 'copy' which is updated from sched_move_task() while |
| 929 | * holding both task_struct::pi_lock and rq::lock. |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 930 | */ |
| 931 | static inline struct task_group *task_group(struct task_struct *p) |
| 932 | { |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 933 | return p->sched_task_group; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 934 | } |
| 935 | |
| 936 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 937 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 938 | { |
| 939 | #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) |
| 940 | struct task_group *tg = task_group(p); |
| 941 | #endif |
| 942 | |
| 943 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Byungchul Park | ad936d8 | 2015-10-24 01:16:19 +0900 | [diff] [blame] | 944 | set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 945 | p->se.cfs_rq = tg->cfs_rq[cpu]; |
| 946 | p->se.parent = tg->se[cpu]; |
| 947 | #endif |
| 948 | |
| 949 | #ifdef CONFIG_RT_GROUP_SCHED |
| 950 | p->rt.rt_rq = tg->rt_rq[cpu]; |
| 951 | p->rt.parent = tg->rt_se[cpu]; |
| 952 | #endif |
| 953 | } |
| 954 | |
| 955 | #else /* CONFIG_CGROUP_SCHED */ |
| 956 | |
| 957 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| 958 | static inline struct task_group *task_group(struct task_struct *p) |
| 959 | { |
| 960 | return NULL; |
| 961 | } |
| 962 | |
| 963 | #endif /* CONFIG_CGROUP_SCHED */ |
| 964 | |
| 965 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 966 | { |
| 967 | set_task_rq(p, cpu); |
| 968 | #ifdef CONFIG_SMP |
| 969 | /* |
| 970 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
| 971 | * successfuly executed on another CPU. We must ensure that updates of |
| 972 | * per-task data have been completed by this moment. |
| 973 | */ |
| 974 | smp_wmb(); |
| 975 | task_thread_info(p)->cpu = cpu; |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 976 | p->wake_cpu = cpu; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 977 | #endif |
| 978 | } |
| 979 | |
| 980 | /* |
| 981 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| 982 | */ |
| 983 | #ifdef CONFIG_SCHED_DEBUG |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 984 | # include <linux/static_key.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 985 | # define const_debug __read_mostly |
| 986 | #else |
| 987 | # define const_debug const |
| 988 | #endif |
| 989 | |
| 990 | extern const_debug unsigned int sysctl_sched_features; |
| 991 | |
| 992 | #define SCHED_FEAT(name, enabled) \ |
| 993 | __SCHED_FEAT_##name , |
| 994 | |
| 995 | enum { |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 996 | #include "features.h" |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 997 | __SCHED_FEAT_NR, |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 998 | }; |
| 999 | |
| 1000 | #undef SCHED_FEAT |
| 1001 | |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1002 | #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1003 | #define SCHED_FEAT(name, enabled) \ |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1004 | static __always_inline bool static_branch_##name(struct static_key *key) \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1005 | { \ |
Jason Baron | 6e76ea8 | 2014-07-02 15:52:41 +0000 | [diff] [blame] | 1006 | return static_key_##enabled(key); \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1007 | } |
| 1008 | |
| 1009 | #include "features.h" |
| 1010 | |
| 1011 | #undef SCHED_FEAT |
| 1012 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1013 | extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1014 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
| 1015 | #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1016 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 1017 | #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1018 | |
Srikar Dronamraju | 2a59572 | 2015-08-11 21:54:21 +0530 | [diff] [blame] | 1019 | extern struct static_key_false sched_numa_balancing; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1020 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1021 | static inline u64 global_rt_period(void) |
| 1022 | { |
| 1023 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| 1024 | } |
| 1025 | |
| 1026 | static inline u64 global_rt_runtime(void) |
| 1027 | { |
| 1028 | if (sysctl_sched_rt_runtime < 0) |
| 1029 | return RUNTIME_INF; |
| 1030 | |
| 1031 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| 1032 | } |
| 1033 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1034 | static inline int task_current(struct rq *rq, struct task_struct *p) |
| 1035 | { |
| 1036 | return rq->curr == p; |
| 1037 | } |
| 1038 | |
| 1039 | static inline int task_running(struct rq *rq, struct task_struct *p) |
| 1040 | { |
| 1041 | #ifdef CONFIG_SMP |
| 1042 | return p->on_cpu; |
| 1043 | #else |
| 1044 | return task_current(rq, p); |
| 1045 | #endif |
| 1046 | } |
| 1047 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 1048 | static inline int task_on_rq_queued(struct task_struct *p) |
| 1049 | { |
| 1050 | return p->on_rq == TASK_ON_RQ_QUEUED; |
| 1051 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1052 | |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 1053 | static inline int task_on_rq_migrating(struct task_struct *p) |
| 1054 | { |
| 1055 | return p->on_rq == TASK_ON_RQ_MIGRATING; |
| 1056 | } |
| 1057 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1058 | #ifndef prepare_arch_switch |
| 1059 | # define prepare_arch_switch(next) do { } while (0) |
| 1060 | #endif |
Catalin Marinas | 01f23e1 | 2011-11-27 21:43:10 +0000 | [diff] [blame] | 1061 | #ifndef finish_arch_post_lock_switch |
| 1062 | # define finish_arch_post_lock_switch() do { } while (0) |
| 1063 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1064 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1065 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
| 1066 | { |
| 1067 | #ifdef CONFIG_SMP |
| 1068 | /* |
| 1069 | * We can optimise this out completely for !SMP, because the |
| 1070 | * SMP rebalancing from interrupt is the only thing that cares |
| 1071 | * here. |
| 1072 | */ |
| 1073 | next->on_cpu = 1; |
| 1074 | #endif |
| 1075 | } |
| 1076 | |
| 1077 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
| 1078 | { |
| 1079 | #ifdef CONFIG_SMP |
| 1080 | /* |
| 1081 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
| 1082 | * We must ensure this doesn't happen until the switch is completely |
| 1083 | * finished. |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1084 | * |
Peter Zijlstra | b75a225 | 2015-10-06 14:36:17 +0200 | [diff] [blame] | 1085 | * In particular, the load of prev->state in finish_task_switch() must |
| 1086 | * happen before this. |
| 1087 | * |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1088 | * Pairs with the control dependency and rmb in try_to_wake_up(). |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1089 | */ |
Peter Zijlstra | 95913d9 | 2015-09-29 14:45:09 +0200 | [diff] [blame] | 1090 | smp_store_release(&prev->on_cpu, 0); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1091 | #endif |
| 1092 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 1093 | /* this is a valid case when another task releases the spinlock */ |
| 1094 | rq->lock.owner = current; |
| 1095 | #endif |
| 1096 | /* |
| 1097 | * If we are tracking spinlock dependencies then we have to |
| 1098 | * fix up the runqueue lock - which gets 'carried over' from |
| 1099 | * prev into current: |
| 1100 | */ |
| 1101 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| 1102 | |
| 1103 | raw_spin_unlock_irq(&rq->lock); |
| 1104 | } |
| 1105 | |
Li Zefan | b13095f | 2013-03-05 16:06:38 +0800 | [diff] [blame] | 1106 | /* |
| 1107 | * wake flags |
| 1108 | */ |
| 1109 | #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ |
| 1110 | #define WF_FORK 0x02 /* child wakeup after fork */ |
| 1111 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ |
| 1112 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1113 | /* |
| 1114 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| 1115 | * of tasks with abnormal "nice" values across CPUs the contribution that |
| 1116 | * each task makes to its run queue's load is weighted according to its |
| 1117 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
| 1118 | * scaled version of the new time slice allocation that they receive on time |
| 1119 | * slice expiry etc. |
| 1120 | */ |
| 1121 | |
| 1122 | #define WEIGHT_IDLEPRIO 3 |
| 1123 | #define WMULT_IDLEPRIO 1431655765 |
| 1124 | |
Andi Kleen | ed82b8a | 2015-11-29 20:59:43 -0800 | [diff] [blame^] | 1125 | extern const int sched_prio_to_weight[40]; |
| 1126 | extern const u32 sched_prio_to_wmult[40]; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1127 | |
Peter Zijlstra | 1de6444 | 2015-09-30 17:44:13 +0200 | [diff] [blame] | 1128 | #define ENQUEUE_WAKEUP 0x01 |
| 1129 | #define ENQUEUE_HEAD 0x02 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1130 | #ifdef CONFIG_SMP |
Peter Zijlstra | 1de6444 | 2015-09-30 17:44:13 +0200 | [diff] [blame] | 1131 | #define ENQUEUE_WAKING 0x04 /* sched_class::task_waking was called */ |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1132 | #else |
Peter Zijlstra | 1de6444 | 2015-09-30 17:44:13 +0200 | [diff] [blame] | 1133 | #define ENQUEUE_WAKING 0x00 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1134 | #endif |
Peter Zijlstra | 1de6444 | 2015-09-30 17:44:13 +0200 | [diff] [blame] | 1135 | #define ENQUEUE_REPLENISH 0x08 |
| 1136 | #define ENQUEUE_RESTORE 0x10 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1137 | |
Peter Zijlstra | 1de6444 | 2015-09-30 17:44:13 +0200 | [diff] [blame] | 1138 | #define DEQUEUE_SLEEP 0x01 |
| 1139 | #define DEQUEUE_SAVE 0x02 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1140 | |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1141 | #define RETRY_TASK ((void *)-1UL) |
| 1142 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1143 | struct sched_class { |
| 1144 | const struct sched_class *next; |
| 1145 | |
| 1146 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1147 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1148 | void (*yield_task) (struct rq *rq); |
| 1149 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); |
| 1150 | |
| 1151 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
| 1152 | |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1153 | /* |
| 1154 | * It is the responsibility of the pick_next_task() method that will |
| 1155 | * return the next task to call put_prev_task() on the @prev task or |
| 1156 | * something equivalent. |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1157 | * |
| 1158 | * May return RETRY_TASK when it finds a higher prio class has runnable |
| 1159 | * tasks. |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1160 | */ |
| 1161 | struct task_struct * (*pick_next_task) (struct rq *rq, |
| 1162 | struct task_struct *prev); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1163 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
| 1164 | |
| 1165 | #ifdef CONFIG_SMP |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 1166 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
xiaofeng.yan | 5a4fd03 | 2015-09-23 14:55:59 +0800 | [diff] [blame] | 1167 | void (*migrate_task_rq)(struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1168 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1169 | void (*task_waking) (struct task_struct *task); |
| 1170 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
| 1171 | |
| 1172 | void (*set_cpus_allowed)(struct task_struct *p, |
| 1173 | const struct cpumask *newmask); |
| 1174 | |
| 1175 | void (*rq_online)(struct rq *rq); |
| 1176 | void (*rq_offline)(struct rq *rq); |
| 1177 | #endif |
| 1178 | |
| 1179 | void (*set_curr_task) (struct rq *rq); |
| 1180 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
| 1181 | void (*task_fork) (struct task_struct *p); |
Dario Faggioli | e6c390f | 2013-11-07 14:43:35 +0100 | [diff] [blame] | 1182 | void (*task_dead) (struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1183 | |
Kirill Tkhai | 67dfa1b | 2014-10-27 17:40:52 +0300 | [diff] [blame] | 1184 | /* |
| 1185 | * The switched_from() call is allowed to drop rq->lock, therefore we |
| 1186 | * cannot assume the switched_from/switched_to pair is serliazed by |
| 1187 | * rq->lock. They are however serialized by p->pi_lock. |
| 1188 | */ |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1189 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); |
| 1190 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
| 1191 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
| 1192 | int oldprio); |
| 1193 | |
| 1194 | unsigned int (*get_rr_interval) (struct rq *rq, |
| 1195 | struct task_struct *task); |
| 1196 | |
Stanislaw Gruszka | 6e99891 | 2014-11-12 16:58:44 +0100 | [diff] [blame] | 1197 | void (*update_curr) (struct rq *rq); |
| 1198 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1199 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | bc54da2 | 2015-08-31 17:13:55 +0200 | [diff] [blame] | 1200 | void (*task_move_group) (struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1201 | #endif |
| 1202 | }; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1203 | |
Peter Zijlstra | 3f1d2a3 | 2014-02-12 10:49:30 +0100 | [diff] [blame] | 1204 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
| 1205 | { |
| 1206 | prev->sched_class->put_prev_task(rq, prev); |
| 1207 | } |
| 1208 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1209 | #define sched_class_highest (&stop_sched_class) |
| 1210 | #define for_each_class(class) \ |
| 1211 | for (class = sched_class_highest; class; class = class->next) |
| 1212 | |
| 1213 | extern const struct sched_class stop_sched_class; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1214 | extern const struct sched_class dl_sched_class; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1215 | extern const struct sched_class rt_sched_class; |
| 1216 | extern const struct sched_class fair_sched_class; |
| 1217 | extern const struct sched_class idle_sched_class; |
| 1218 | |
| 1219 | |
| 1220 | #ifdef CONFIG_SMP |
| 1221 | |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1222 | extern void update_group_capacity(struct sched_domain *sd, int cpu); |
Li Zefan | b719203 | 2013-03-07 10:00:26 +0800 | [diff] [blame] | 1223 | |
Daniel Lezcano | 7caff66 | 2014-01-06 12:34:38 +0100 | [diff] [blame] | 1224 | extern void trigger_load_balance(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1225 | |
Peter Zijlstra | c5b2803 | 2015-05-15 17:43:35 +0200 | [diff] [blame] | 1226 | extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); |
| 1227 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1228 | #endif |
| 1229 | |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1230 | #ifdef CONFIG_CPU_IDLE |
| 1231 | static inline void idle_set_state(struct rq *rq, |
| 1232 | struct cpuidle_state *idle_state) |
| 1233 | { |
| 1234 | rq->idle_state = idle_state; |
| 1235 | } |
| 1236 | |
| 1237 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1238 | { |
| 1239 | WARN_ON(!rcu_read_lock_held()); |
| 1240 | return rq->idle_state; |
| 1241 | } |
| 1242 | #else |
| 1243 | static inline void idle_set_state(struct rq *rq, |
| 1244 | struct cpuidle_state *idle_state) |
| 1245 | { |
| 1246 | } |
| 1247 | |
| 1248 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1249 | { |
| 1250 | return NULL; |
| 1251 | } |
| 1252 | #endif |
| 1253 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1254 | extern void sysrq_sched_debug_show(void); |
| 1255 | extern void sched_init_granularity(void); |
| 1256 | extern void update_max_interval(void); |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 1257 | |
| 1258 | extern void init_sched_dl_class(void); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1259 | extern void init_sched_rt_class(void); |
| 1260 | extern void init_sched_fair_class(void); |
| 1261 | |
Kirill Tkhai | 8875125 | 2014-06-29 00:03:57 +0400 | [diff] [blame] | 1262 | extern void resched_curr(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1263 | extern void resched_cpu(int cpu); |
| 1264 | |
| 1265 | extern struct rt_bandwidth def_rt_bandwidth; |
| 1266 | extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); |
| 1267 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1268 | extern struct dl_bandwidth def_dl_bandwidth; |
| 1269 | extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1270 | extern void init_dl_task_timer(struct sched_dl_entity *dl_se); |
| 1271 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1272 | unsigned long to_ratio(u64 period, u64 runtime); |
| 1273 | |
Yuyang Du | 540247f | 2015-07-15 08:04:39 +0800 | [diff] [blame] | 1274 | extern void init_entity_runnable_average(struct sched_entity *se); |
Alex Shi | a75cdaa | 2013-06-20 10:18:47 +0800 | [diff] [blame] | 1275 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1276 | static inline void add_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1277 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1278 | unsigned prev_nr = rq->nr_running; |
| 1279 | |
| 1280 | rq->nr_running = prev_nr + count; |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1281 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1282 | if (prev_nr < 2 && rq->nr_running >= 2) { |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1283 | #ifdef CONFIG_SMP |
| 1284 | if (!rq->rd->overload) |
| 1285 | rq->rd->overload = true; |
| 1286 | #endif |
| 1287 | |
| 1288 | #ifdef CONFIG_NO_HZ_FULL |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1289 | if (tick_nohz_full_cpu(rq->cpu)) { |
Frederic Weisbecker | 3882ec6 | 2014-03-18 22:54:04 +0100 | [diff] [blame] | 1290 | /* |
| 1291 | * Tick is needed if more than one task runs on a CPU. |
| 1292 | * Send the target an IPI to kick it out of nohz mode. |
| 1293 | * |
| 1294 | * We assume that IPI implies full memory barrier and the |
| 1295 | * new value of rq->nr_running is visible on reception |
| 1296 | * from the target. |
| 1297 | */ |
Frederic Weisbecker | fd2ac4f | 2014-03-18 21:12:53 +0100 | [diff] [blame] | 1298 | tick_nohz_full_kick_cpu(rq->cpu); |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1299 | } |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1300 | #endif |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1301 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1302 | } |
| 1303 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1304 | static inline void sub_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1305 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1306 | rq->nr_running -= count; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1307 | } |
| 1308 | |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 1309 | static inline void rq_last_tick_reset(struct rq *rq) |
| 1310 | { |
| 1311 | #ifdef CONFIG_NO_HZ_FULL |
| 1312 | rq->last_sched_tick = jiffies; |
| 1313 | #endif |
| 1314 | } |
| 1315 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1316 | extern void update_rq_clock(struct rq *rq); |
| 1317 | |
| 1318 | extern void activate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1319 | extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1320 | |
| 1321 | extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
| 1322 | |
| 1323 | extern const_debug unsigned int sysctl_sched_time_avg; |
| 1324 | extern const_debug unsigned int sysctl_sched_nr_migrate; |
| 1325 | extern const_debug unsigned int sysctl_sched_migration_cost; |
| 1326 | |
| 1327 | static inline u64 sched_avg_period(void) |
| 1328 | { |
| 1329 | return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; |
| 1330 | } |
| 1331 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1332 | #ifdef CONFIG_SCHED_HRTICK |
| 1333 | |
| 1334 | /* |
| 1335 | * Use hrtick when: |
| 1336 | * - enabled by features |
| 1337 | * - hrtimer is actually high res |
| 1338 | */ |
| 1339 | static inline int hrtick_enabled(struct rq *rq) |
| 1340 | { |
| 1341 | if (!sched_feat(HRTICK)) |
| 1342 | return 0; |
| 1343 | if (!cpu_active(cpu_of(rq))) |
| 1344 | return 0; |
| 1345 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
| 1346 | } |
| 1347 | |
| 1348 | void hrtick_start(struct rq *rq, u64 delay); |
| 1349 | |
Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 1350 | #else |
| 1351 | |
| 1352 | static inline int hrtick_enabled(struct rq *rq) |
| 1353 | { |
| 1354 | return 0; |
| 1355 | } |
| 1356 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1357 | #endif /* CONFIG_SCHED_HRTICK */ |
| 1358 | |
| 1359 | #ifdef CONFIG_SMP |
| 1360 | extern void sched_avg_update(struct rq *rq); |
Peter Zijlstra | dfbca41 | 2015-03-23 14:19:05 +0100 | [diff] [blame] | 1361 | |
| 1362 | #ifndef arch_scale_freq_capacity |
| 1363 | static __always_inline |
| 1364 | unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) |
| 1365 | { |
| 1366 | return SCHED_CAPACITY_SCALE; |
| 1367 | } |
| 1368 | #endif |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1369 | |
Morten Rasmussen | 8cd5601 | 2015-08-14 17:23:10 +0100 | [diff] [blame] | 1370 | #ifndef arch_scale_cpu_capacity |
| 1371 | static __always_inline |
| 1372 | unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) |
| 1373 | { |
Dietmar Eggemann | e3279a2 | 2015-08-15 00:04:41 +0100 | [diff] [blame] | 1374 | if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) |
Morten Rasmussen | 8cd5601 | 2015-08-14 17:23:10 +0100 | [diff] [blame] | 1375 | return sd->smt_gain / sd->span_weight; |
| 1376 | |
| 1377 | return SCHED_CAPACITY_SCALE; |
| 1378 | } |
| 1379 | #endif |
| 1380 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1381 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1382 | { |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1383 | rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1384 | sched_avg_update(rq); |
| 1385 | } |
| 1386 | #else |
| 1387 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } |
| 1388 | static inline void sched_avg_update(struct rq *rq) { } |
| 1389 | #endif |
| 1390 | |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1391 | /* |
| 1392 | * __task_rq_lock - lock the rq @p resides on. |
| 1393 | */ |
| 1394 | static inline struct rq *__task_rq_lock(struct task_struct *p) |
| 1395 | __acquires(rq->lock) |
| 1396 | { |
| 1397 | struct rq *rq; |
| 1398 | |
| 1399 | lockdep_assert_held(&p->pi_lock); |
| 1400 | |
| 1401 | for (;;) { |
| 1402 | rq = task_rq(p); |
| 1403 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | cbce1a6 | 2015-06-11 14:46:54 +0200 | [diff] [blame] | 1404 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { |
| 1405 | lockdep_pin_lock(&rq->lock); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1406 | return rq; |
Peter Zijlstra | cbce1a6 | 2015-06-11 14:46:54 +0200 | [diff] [blame] | 1407 | } |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1408 | raw_spin_unlock(&rq->lock); |
| 1409 | |
| 1410 | while (unlikely(task_on_rq_migrating(p))) |
| 1411 | cpu_relax(); |
| 1412 | } |
| 1413 | } |
| 1414 | |
| 1415 | /* |
| 1416 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. |
| 1417 | */ |
| 1418 | static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) |
| 1419 | __acquires(p->pi_lock) |
| 1420 | __acquires(rq->lock) |
| 1421 | { |
| 1422 | struct rq *rq; |
| 1423 | |
| 1424 | for (;;) { |
| 1425 | raw_spin_lock_irqsave(&p->pi_lock, *flags); |
| 1426 | rq = task_rq(p); |
| 1427 | raw_spin_lock(&rq->lock); |
| 1428 | /* |
| 1429 | * move_queued_task() task_rq_lock() |
| 1430 | * |
| 1431 | * ACQUIRE (rq->lock) |
| 1432 | * [S] ->on_rq = MIGRATING [L] rq = task_rq() |
| 1433 | * WMB (__set_task_cpu()) ACQUIRE (rq->lock); |
| 1434 | * [S] ->cpu = new_cpu [L] task_rq() |
| 1435 | * [L] ->on_rq |
| 1436 | * RELEASE (rq->lock) |
| 1437 | * |
| 1438 | * If we observe the old cpu in task_rq_lock, the acquire of |
| 1439 | * the old rq->lock will fully serialize against the stores. |
| 1440 | * |
| 1441 | * If we observe the new cpu in task_rq_lock, the acquire will |
| 1442 | * pair with the WMB to ensure we must then also see migrating. |
| 1443 | */ |
Peter Zijlstra | cbce1a6 | 2015-06-11 14:46:54 +0200 | [diff] [blame] | 1444 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { |
| 1445 | lockdep_pin_lock(&rq->lock); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1446 | return rq; |
Peter Zijlstra | cbce1a6 | 2015-06-11 14:46:54 +0200 | [diff] [blame] | 1447 | } |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1448 | raw_spin_unlock(&rq->lock); |
| 1449 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); |
| 1450 | |
| 1451 | while (unlikely(task_on_rq_migrating(p))) |
| 1452 | cpu_relax(); |
| 1453 | } |
| 1454 | } |
| 1455 | |
| 1456 | static inline void __task_rq_unlock(struct rq *rq) |
| 1457 | __releases(rq->lock) |
| 1458 | { |
Peter Zijlstra | cbce1a6 | 2015-06-11 14:46:54 +0200 | [diff] [blame] | 1459 | lockdep_unpin_lock(&rq->lock); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1460 | raw_spin_unlock(&rq->lock); |
| 1461 | } |
| 1462 | |
| 1463 | static inline void |
| 1464 | task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) |
| 1465 | __releases(rq->lock) |
| 1466 | __releases(p->pi_lock) |
| 1467 | { |
Peter Zijlstra | cbce1a6 | 2015-06-11 14:46:54 +0200 | [diff] [blame] | 1468 | lockdep_unpin_lock(&rq->lock); |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1469 | raw_spin_unlock(&rq->lock); |
| 1470 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); |
| 1471 | } |
| 1472 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1473 | #ifdef CONFIG_SMP |
| 1474 | #ifdef CONFIG_PREEMPT |
| 1475 | |
| 1476 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| 1477 | |
| 1478 | /* |
| 1479 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| 1480 | * way at the expense of forcing extra atomic operations in all |
| 1481 | * invocations. This assures that the double_lock is acquired using the |
| 1482 | * same underlying policy as the spinlock_t on this architecture, which |
| 1483 | * reduces latency compared to the unfair variant below. However, it |
| 1484 | * also adds more overhead and therefore may reduce throughput. |
| 1485 | */ |
| 1486 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1487 | __releases(this_rq->lock) |
| 1488 | __acquires(busiest->lock) |
| 1489 | __acquires(this_rq->lock) |
| 1490 | { |
| 1491 | raw_spin_unlock(&this_rq->lock); |
| 1492 | double_rq_lock(this_rq, busiest); |
| 1493 | |
| 1494 | return 1; |
| 1495 | } |
| 1496 | |
| 1497 | #else |
| 1498 | /* |
| 1499 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
| 1500 | * latency by eliminating extra atomic operations when the locks are |
| 1501 | * already in proper order on entry. This favors lower cpu-ids and will |
| 1502 | * grant the double lock to lower cpus over higher ids under contention, |
| 1503 | * regardless of entry order into the function. |
| 1504 | */ |
| 1505 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1506 | __releases(this_rq->lock) |
| 1507 | __acquires(busiest->lock) |
| 1508 | __acquires(this_rq->lock) |
| 1509 | { |
| 1510 | int ret = 0; |
| 1511 | |
| 1512 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
| 1513 | if (busiest < this_rq) { |
| 1514 | raw_spin_unlock(&this_rq->lock); |
| 1515 | raw_spin_lock(&busiest->lock); |
| 1516 | raw_spin_lock_nested(&this_rq->lock, |
| 1517 | SINGLE_DEPTH_NESTING); |
| 1518 | ret = 1; |
| 1519 | } else |
| 1520 | raw_spin_lock_nested(&busiest->lock, |
| 1521 | SINGLE_DEPTH_NESTING); |
| 1522 | } |
| 1523 | return ret; |
| 1524 | } |
| 1525 | |
| 1526 | #endif /* CONFIG_PREEMPT */ |
| 1527 | |
| 1528 | /* |
| 1529 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| 1530 | */ |
| 1531 | static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1532 | { |
| 1533 | if (unlikely(!irqs_disabled())) { |
| 1534 | /* printk() doesn't work good under rq->lock */ |
| 1535 | raw_spin_unlock(&this_rq->lock); |
| 1536 | BUG_ON(1); |
| 1537 | } |
| 1538 | |
| 1539 | return _double_lock_balance(this_rq, busiest); |
| 1540 | } |
| 1541 | |
| 1542 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 1543 | __releases(busiest->lock) |
| 1544 | { |
| 1545 | raw_spin_unlock(&busiest->lock); |
| 1546 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 1547 | } |
| 1548 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 1549 | static inline void double_lock(spinlock_t *l1, spinlock_t *l2) |
| 1550 | { |
| 1551 | if (l1 > l2) |
| 1552 | swap(l1, l2); |
| 1553 | |
| 1554 | spin_lock(l1); |
| 1555 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1556 | } |
| 1557 | |
Mike Galbraith | 60e69ee | 2014-04-07 10:55:15 +0200 | [diff] [blame] | 1558 | static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) |
| 1559 | { |
| 1560 | if (l1 > l2) |
| 1561 | swap(l1, l2); |
| 1562 | |
| 1563 | spin_lock_irq(l1); |
| 1564 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1565 | } |
| 1566 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 1567 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
| 1568 | { |
| 1569 | if (l1 > l2) |
| 1570 | swap(l1, l2); |
| 1571 | |
| 1572 | raw_spin_lock(l1); |
| 1573 | raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1574 | } |
| 1575 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1576 | /* |
| 1577 | * double_rq_lock - safely lock two runqueues |
| 1578 | * |
| 1579 | * Note this does not disable interrupts like task_rq_lock, |
| 1580 | * you need to do so manually before calling. |
| 1581 | */ |
| 1582 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1583 | __acquires(rq1->lock) |
| 1584 | __acquires(rq2->lock) |
| 1585 | { |
| 1586 | BUG_ON(!irqs_disabled()); |
| 1587 | if (rq1 == rq2) { |
| 1588 | raw_spin_lock(&rq1->lock); |
| 1589 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1590 | } else { |
| 1591 | if (rq1 < rq2) { |
| 1592 | raw_spin_lock(&rq1->lock); |
| 1593 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 1594 | } else { |
| 1595 | raw_spin_lock(&rq2->lock); |
| 1596 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 1597 | } |
| 1598 | } |
| 1599 | } |
| 1600 | |
| 1601 | /* |
| 1602 | * double_rq_unlock - safely unlock two runqueues |
| 1603 | * |
| 1604 | * Note this does not restore interrupts like task_rq_unlock, |
| 1605 | * you need to do so manually after calling. |
| 1606 | */ |
| 1607 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1608 | __releases(rq1->lock) |
| 1609 | __releases(rq2->lock) |
| 1610 | { |
| 1611 | raw_spin_unlock(&rq1->lock); |
| 1612 | if (rq1 != rq2) |
| 1613 | raw_spin_unlock(&rq2->lock); |
| 1614 | else |
| 1615 | __release(rq2->lock); |
| 1616 | } |
| 1617 | |
| 1618 | #else /* CONFIG_SMP */ |
| 1619 | |
| 1620 | /* |
| 1621 | * double_rq_lock - safely lock two runqueues |
| 1622 | * |
| 1623 | * Note this does not disable interrupts like task_rq_lock, |
| 1624 | * you need to do so manually before calling. |
| 1625 | */ |
| 1626 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1627 | __acquires(rq1->lock) |
| 1628 | __acquires(rq2->lock) |
| 1629 | { |
| 1630 | BUG_ON(!irqs_disabled()); |
| 1631 | BUG_ON(rq1 != rq2); |
| 1632 | raw_spin_lock(&rq1->lock); |
| 1633 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1634 | } |
| 1635 | |
| 1636 | /* |
| 1637 | * double_rq_unlock - safely unlock two runqueues |
| 1638 | * |
| 1639 | * Note this does not restore interrupts like task_rq_unlock, |
| 1640 | * you need to do so manually after calling. |
| 1641 | */ |
| 1642 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1643 | __releases(rq1->lock) |
| 1644 | __releases(rq2->lock) |
| 1645 | { |
| 1646 | BUG_ON(rq1 != rq2); |
| 1647 | raw_spin_unlock(&rq1->lock); |
| 1648 | __release(rq2->lock); |
| 1649 | } |
| 1650 | |
| 1651 | #endif |
| 1652 | |
| 1653 | extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); |
| 1654 | extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); |
Srikar Dronamraju | 6b55c96 | 2015-06-25 22:51:41 +0530 | [diff] [blame] | 1655 | |
| 1656 | #ifdef CONFIG_SCHED_DEBUG |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1657 | extern void print_cfs_stats(struct seq_file *m, int cpu); |
| 1658 | extern void print_rt_stats(struct seq_file *m, int cpu); |
Wanpeng Li | acb3213 | 2014-10-31 06:39:33 +0800 | [diff] [blame] | 1659 | extern void print_dl_stats(struct seq_file *m, int cpu); |
Srikar Dronamraju | 6b55c96 | 2015-06-25 22:51:41 +0530 | [diff] [blame] | 1660 | extern void |
| 1661 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
Srikar Dronamraju | 397f237 | 2015-06-25 22:51:43 +0530 | [diff] [blame] | 1662 | |
| 1663 | #ifdef CONFIG_NUMA_BALANCING |
| 1664 | extern void |
| 1665 | show_numa_stats(struct task_struct *p, struct seq_file *m); |
| 1666 | extern void |
| 1667 | print_numa_stats(struct seq_file *m, int node, unsigned long tsf, |
| 1668 | unsigned long tpf, unsigned long gsf, unsigned long gpf); |
| 1669 | #endif /* CONFIG_NUMA_BALANCING */ |
| 1670 | #endif /* CONFIG_SCHED_DEBUG */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1671 | |
| 1672 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
Abel Vesa | 07c54f7 | 2015-03-03 13:50:27 +0200 | [diff] [blame] | 1673 | extern void init_rt_rq(struct rt_rq *rt_rq); |
| 1674 | extern void init_dl_rq(struct dl_rq *dl_rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1675 | |
Ben Segall | 1ee14e6 | 2013-10-16 11:16:12 -0700 | [diff] [blame] | 1676 | extern void cfs_bandwidth_usage_inc(void); |
| 1677 | extern void cfs_bandwidth_usage_dec(void); |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 1678 | |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 1679 | #ifdef CONFIG_NO_HZ_COMMON |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 1680 | enum rq_nohz_flag_bits { |
| 1681 | NOHZ_TICK_STOPPED, |
| 1682 | NOHZ_BALANCE_KICK, |
| 1683 | }; |
| 1684 | |
| 1685 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
| 1686 | #endif |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1687 | |
| 1688 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 1689 | |
| 1690 | DECLARE_PER_CPU(u64, cpu_hardirq_time); |
| 1691 | DECLARE_PER_CPU(u64, cpu_softirq_time); |
| 1692 | |
| 1693 | #ifndef CONFIG_64BIT |
| 1694 | DECLARE_PER_CPU(seqcount_t, irq_time_seq); |
| 1695 | |
| 1696 | static inline void irq_time_write_begin(void) |
| 1697 | { |
| 1698 | __this_cpu_inc(irq_time_seq.sequence); |
| 1699 | smp_wmb(); |
| 1700 | } |
| 1701 | |
| 1702 | static inline void irq_time_write_end(void) |
| 1703 | { |
| 1704 | smp_wmb(); |
| 1705 | __this_cpu_inc(irq_time_seq.sequence); |
| 1706 | } |
| 1707 | |
| 1708 | static inline u64 irq_time_read(int cpu) |
| 1709 | { |
| 1710 | u64 irq_time; |
| 1711 | unsigned seq; |
| 1712 | |
| 1713 | do { |
| 1714 | seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); |
| 1715 | irq_time = per_cpu(cpu_softirq_time, cpu) + |
| 1716 | per_cpu(cpu_hardirq_time, cpu); |
| 1717 | } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); |
| 1718 | |
| 1719 | return irq_time; |
| 1720 | } |
| 1721 | #else /* CONFIG_64BIT */ |
| 1722 | static inline void irq_time_write_begin(void) |
| 1723 | { |
| 1724 | } |
| 1725 | |
| 1726 | static inline void irq_time_write_end(void) |
| 1727 | { |
| 1728 | } |
| 1729 | |
| 1730 | static inline u64 irq_time_read(int cpu) |
| 1731 | { |
| 1732 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); |
| 1733 | } |
| 1734 | #endif /* CONFIG_64BIT */ |
| 1735 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |