Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1 | |
| 2 | #include <linux/sched.h> |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 3 | #include <linux/sched/sysctl.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 4 | #include <linux/sched/rt.h> |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 5 | #include <linux/sched/deadline.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 6 | #include <linux/mutex.h> |
| 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/stop_machine.h> |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 9 | #include <linux/irq_work.h> |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 10 | #include <linux/tick.h> |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 11 | #include <linux/slab.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 12 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 13 | #include "cpupri.h" |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 14 | #include "cpudeadline.h" |
Li Zefan | 60fed78 | 2013-03-29 14:36:43 +0800 | [diff] [blame] | 15 | #include "cpuacct.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 16 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 17 | struct rq; |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 18 | struct cpuidle_state; |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 19 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 20 | /* task_struct::on_rq states: */ |
| 21 | #define TASK_ON_RQ_QUEUED 1 |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 22 | #define TASK_ON_RQ_MIGRATING 2 |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 23 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 24 | extern __read_mostly int scheduler_running; |
| 25 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 26 | extern unsigned long calc_load_update; |
| 27 | extern atomic_long_t calc_load_tasks; |
| 28 | |
| 29 | extern long calc_load_fold_active(struct rq *this_rq); |
| 30 | extern void update_cpu_load_active(struct rq *this_rq); |
| 31 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 32 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 33 | * Helpers for converting nanosecond timing to jiffy resolution |
| 34 | */ |
| 35 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
| 36 | |
Li Zefan | cc1f4b1 | 2013-03-05 16:06:09 +0800 | [diff] [blame] | 37 | /* |
| 38 | * Increase resolution of nice-level calculations for 64-bit architectures. |
| 39 | * The extra resolution improves shares distribution and load balancing of |
| 40 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup |
| 41 | * hierarchies, especially on larger systems. This is not a user-visible change |
| 42 | * and does not change the user-interface for setting shares/weights. |
| 43 | * |
| 44 | * We increase resolution only if we have enough bits to allow this increased |
| 45 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution |
| 46 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the |
| 47 | * increased costs. |
| 48 | */ |
| 49 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ |
| 50 | # define SCHED_LOAD_RESOLUTION 10 |
| 51 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) |
| 52 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) |
| 53 | #else |
| 54 | # define SCHED_LOAD_RESOLUTION 0 |
| 55 | # define scale_load(w) (w) |
| 56 | # define scale_load_down(w) (w) |
| 57 | #endif |
| 58 | |
| 59 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) |
| 60 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) |
| 61 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 62 | #define NICE_0_LOAD SCHED_LOAD_SCALE |
| 63 | #define NICE_0_SHIFT SCHED_LOAD_SHIFT |
| 64 | |
| 65 | /* |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 66 | * Single value that decides SCHED_DEADLINE internal math precision. |
| 67 | * 10 -> just above 1us |
| 68 | * 9 -> just above 0.5us |
| 69 | */ |
| 70 | #define DL_SCALE (10) |
| 71 | |
| 72 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 73 | * These are the 'tuning knobs' of the scheduler: |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 74 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 75 | |
| 76 | /* |
| 77 | * single value that denotes runtime == period, ie unlimited time. |
| 78 | */ |
| 79 | #define RUNTIME_INF ((u64)~0ULL) |
| 80 | |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 81 | static inline int fair_policy(int policy) |
| 82 | { |
| 83 | return policy == SCHED_NORMAL || policy == SCHED_BATCH; |
| 84 | } |
| 85 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 86 | static inline int rt_policy(int policy) |
| 87 | { |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 88 | return policy == SCHED_FIFO || policy == SCHED_RR; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 89 | } |
| 90 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 91 | static inline int dl_policy(int policy) |
| 92 | { |
| 93 | return policy == SCHED_DEADLINE; |
| 94 | } |
| 95 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 96 | static inline int task_has_rt_policy(struct task_struct *p) |
| 97 | { |
| 98 | return rt_policy(p->policy); |
| 99 | } |
| 100 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 101 | static inline int task_has_dl_policy(struct task_struct *p) |
| 102 | { |
| 103 | return dl_policy(p->policy); |
| 104 | } |
| 105 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 106 | static inline bool dl_time_before(u64 a, u64 b) |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 107 | { |
| 108 | return (s64)(a - b) < 0; |
| 109 | } |
| 110 | |
| 111 | /* |
| 112 | * Tells if entity @a should preempt entity @b. |
| 113 | */ |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 114 | static inline bool |
| 115 | dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 116 | { |
| 117 | return dl_time_before(a->deadline, b->deadline); |
| 118 | } |
| 119 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 120 | /* |
| 121 | * This is the priority-queue data structure of the RT scheduling class: |
| 122 | */ |
| 123 | struct rt_prio_array { |
| 124 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| 125 | struct list_head queue[MAX_RT_PRIO]; |
| 126 | }; |
| 127 | |
| 128 | struct rt_bandwidth { |
| 129 | /* nests inside the rq lock: */ |
| 130 | raw_spinlock_t rt_runtime_lock; |
| 131 | ktime_t rt_period; |
| 132 | u64 rt_runtime; |
| 133 | struct hrtimer rt_period_timer; |
| 134 | }; |
Juri Lelli | a5e7be3 | 2014-09-19 10:22:39 +0100 | [diff] [blame] | 135 | |
| 136 | void __dl_clear_params(struct task_struct *p); |
| 137 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 138 | /* |
| 139 | * To keep the bandwidth of -deadline tasks and groups under control |
| 140 | * we need some place where: |
| 141 | * - store the maximum -deadline bandwidth of the system (the group); |
| 142 | * - cache the fraction of that bandwidth that is currently allocated. |
| 143 | * |
| 144 | * This is all done in the data structure below. It is similar to the |
| 145 | * one used for RT-throttling (rt_bandwidth), with the main difference |
| 146 | * that, since here we are only interested in admission control, we |
| 147 | * do not decrease any runtime while the group "executes", neither we |
| 148 | * need a timer to replenish it. |
| 149 | * |
| 150 | * With respect to SMP, the bandwidth is given on a per-CPU basis, |
| 151 | * meaning that: |
| 152 | * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; |
| 153 | * - dl_total_bw array contains, in the i-eth element, the currently |
| 154 | * allocated bandwidth on the i-eth CPU. |
| 155 | * Moreover, groups consume bandwidth on each CPU, while tasks only |
| 156 | * consume bandwidth on the CPU they're running on. |
| 157 | * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw |
| 158 | * that will be shown the next time the proc or cgroup controls will |
| 159 | * be red. It on its turn can be changed by writing on its own |
| 160 | * control. |
| 161 | */ |
| 162 | struct dl_bandwidth { |
| 163 | raw_spinlock_t dl_runtime_lock; |
| 164 | u64 dl_runtime; |
| 165 | u64 dl_period; |
| 166 | }; |
| 167 | |
| 168 | static inline int dl_bandwidth_enabled(void) |
| 169 | { |
Peter Zijlstra | 1724813 | 2013-12-17 12:44:49 +0100 | [diff] [blame] | 170 | return sysctl_sched_rt_runtime >= 0; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | extern struct dl_bw *dl_bw_of(int i); |
| 174 | |
| 175 | struct dl_bw { |
| 176 | raw_spinlock_t lock; |
| 177 | u64 bw, total_bw; |
| 178 | }; |
| 179 | |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 180 | static inline |
| 181 | void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) |
| 182 | { |
| 183 | dl_b->total_bw -= tsk_bw; |
| 184 | } |
| 185 | |
| 186 | static inline |
| 187 | void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) |
| 188 | { |
| 189 | dl_b->total_bw += tsk_bw; |
| 190 | } |
| 191 | |
| 192 | static inline |
| 193 | bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) |
| 194 | { |
| 195 | return dl_b->bw != -1 && |
| 196 | dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; |
| 197 | } |
| 198 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 199 | extern struct mutex sched_domains_mutex; |
| 200 | |
| 201 | #ifdef CONFIG_CGROUP_SCHED |
| 202 | |
| 203 | #include <linux/cgroup.h> |
| 204 | |
| 205 | struct cfs_rq; |
| 206 | struct rt_rq; |
| 207 | |
Mike Galbraith | 35cf4e5 | 2012-08-07 05:00:13 +0200 | [diff] [blame] | 208 | extern struct list_head task_groups; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 209 | |
| 210 | struct cfs_bandwidth { |
| 211 | #ifdef CONFIG_CFS_BANDWIDTH |
| 212 | raw_spinlock_t lock; |
| 213 | ktime_t period; |
| 214 | u64 quota, runtime; |
Zhihui Zhang | 9c58c79 | 2014-09-20 21:24:36 -0400 | [diff] [blame] | 215 | s64 hierarchical_quota; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 216 | u64 runtime_expires; |
| 217 | |
Peter Zijlstra | 77a4d1a | 2015-04-15 11:41:57 +0200 | [diff] [blame^] | 218 | int idle; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 219 | struct hrtimer period_timer, slack_timer; |
| 220 | struct list_head throttled_cfs_rq; |
| 221 | |
| 222 | /* statistics */ |
| 223 | int nr_periods, nr_throttled; |
| 224 | u64 throttled_time; |
| 225 | #endif |
| 226 | }; |
| 227 | |
| 228 | /* task group related information */ |
| 229 | struct task_group { |
| 230 | struct cgroup_subsys_state css; |
| 231 | |
| 232 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 233 | /* schedulable entities of this group on each cpu */ |
| 234 | struct sched_entity **se; |
| 235 | /* runqueue "owned" by this group on each cpu */ |
| 236 | struct cfs_rq **cfs_rq; |
| 237 | unsigned long shares; |
| 238 | |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 239 | #ifdef CONFIG_SMP |
Alex Shi | bf5b986 | 2013-06-20 10:18:54 +0800 | [diff] [blame] | 240 | atomic_long_t load_avg; |
Paul Turner | bb17f65 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 241 | atomic_t runnable_avg; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 242 | #endif |
Alex Shi | fa6bdde | 2013-06-20 10:18:46 +0800 | [diff] [blame] | 243 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 244 | |
| 245 | #ifdef CONFIG_RT_GROUP_SCHED |
| 246 | struct sched_rt_entity **rt_se; |
| 247 | struct rt_rq **rt_rq; |
| 248 | |
| 249 | struct rt_bandwidth rt_bandwidth; |
| 250 | #endif |
| 251 | |
| 252 | struct rcu_head rcu; |
| 253 | struct list_head list; |
| 254 | |
| 255 | struct task_group *parent; |
| 256 | struct list_head siblings; |
| 257 | struct list_head children; |
| 258 | |
| 259 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 260 | struct autogroup *autogroup; |
| 261 | #endif |
| 262 | |
| 263 | struct cfs_bandwidth cfs_bandwidth; |
| 264 | }; |
| 265 | |
| 266 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 267 | #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
| 268 | |
| 269 | /* |
| 270 | * A weight of 0 or 1 can cause arithmetics problems. |
| 271 | * A weight of a cfs_rq is the sum of weights of which entities |
| 272 | * are queued on this cfs_rq, so a weight of a entity should not be |
| 273 | * too large, so as the shares value of a task group. |
| 274 | * (The default weight is 1024 - so there's no practical |
| 275 | * limitation from this.) |
| 276 | */ |
| 277 | #define MIN_SHARES (1UL << 1) |
| 278 | #define MAX_SHARES (1UL << 18) |
| 279 | #endif |
| 280 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 281 | typedef int (*tg_visitor)(struct task_group *, void *); |
| 282 | |
| 283 | extern int walk_tg_tree_from(struct task_group *from, |
| 284 | tg_visitor down, tg_visitor up, void *data); |
| 285 | |
| 286 | /* |
| 287 | * Iterate the full tree, calling @down when first entering a node and @up when |
| 288 | * leaving it for the final time. |
| 289 | * |
| 290 | * Caller must hold rcu_lock or sufficient equivalent. |
| 291 | */ |
| 292 | static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| 293 | { |
| 294 | return walk_tg_tree_from(&root_task_group, down, up, data); |
| 295 | } |
| 296 | |
| 297 | extern int tg_nop(struct task_group *tg, void *data); |
| 298 | |
| 299 | extern void free_fair_sched_group(struct task_group *tg); |
| 300 | extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); |
| 301 | extern void unregister_fair_sched_group(struct task_group *tg, int cpu); |
| 302 | extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| 303 | struct sched_entity *se, int cpu, |
| 304 | struct sched_entity *parent); |
| 305 | extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
| 306 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
| 307 | |
| 308 | extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 77a4d1a | 2015-04-15 11:41:57 +0200 | [diff] [blame^] | 309 | extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 310 | extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); |
| 311 | |
| 312 | extern void free_rt_sched_group(struct task_group *tg); |
| 313 | extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); |
| 314 | extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
| 315 | struct sched_rt_entity *rt_se, int cpu, |
| 316 | struct sched_rt_entity *parent); |
| 317 | |
Li Zefan | 25cc7da | 2013-03-05 16:07:33 +0800 | [diff] [blame] | 318 | extern struct task_group *sched_create_group(struct task_group *parent); |
| 319 | extern void sched_online_group(struct task_group *tg, |
| 320 | struct task_group *parent); |
| 321 | extern void sched_destroy_group(struct task_group *tg); |
| 322 | extern void sched_offline_group(struct task_group *tg); |
| 323 | |
| 324 | extern void sched_move_task(struct task_struct *tsk); |
| 325 | |
| 326 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 327 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
| 328 | #endif |
| 329 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 330 | #else /* CONFIG_CGROUP_SCHED */ |
| 331 | |
| 332 | struct cfs_bandwidth { }; |
| 333 | |
| 334 | #endif /* CONFIG_CGROUP_SCHED */ |
| 335 | |
| 336 | /* CFS-related fields in a runqueue */ |
| 337 | struct cfs_rq { |
| 338 | struct load_weight load; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 339 | unsigned int nr_running, h_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 340 | |
| 341 | u64 exec_clock; |
| 342 | u64 min_vruntime; |
| 343 | #ifndef CONFIG_64BIT |
| 344 | u64 min_vruntime_copy; |
| 345 | #endif |
| 346 | |
| 347 | struct rb_root tasks_timeline; |
| 348 | struct rb_node *rb_leftmost; |
| 349 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 350 | /* |
| 351 | * 'curr' points to currently running entity on this cfs_rq. |
| 352 | * It is set to NULL otherwise (i.e when none are currently running). |
| 353 | */ |
| 354 | struct sched_entity *curr, *next, *last, *skip; |
| 355 | |
| 356 | #ifdef CONFIG_SCHED_DEBUG |
| 357 | unsigned int nr_spread_over; |
| 358 | #endif |
| 359 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 360 | #ifdef CONFIG_SMP |
| 361 | /* |
| 362 | * CFS Load tracking |
| 363 | * Under CFS, load is tracked on a per-entity basis and aggregated up. |
| 364 | * This allows for the description of both thread and group usage (in |
| 365 | * the FAIR_GROUP_SCHED case). |
Vincent Guittot | 36ee28e | 2015-02-27 16:54:04 +0100 | [diff] [blame] | 366 | * runnable_load_avg is the sum of the load_avg_contrib of the |
| 367 | * sched_entities on the rq. |
| 368 | * blocked_load_avg is similar to runnable_load_avg except that its |
| 369 | * the blocked sched_entities on the rq. |
| 370 | * utilization_load_avg is the sum of the average running time of the |
| 371 | * sched_entities on the rq. |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 372 | */ |
Vincent Guittot | 36ee28e | 2015-02-27 16:54:04 +0100 | [diff] [blame] | 373 | unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg; |
Alex Shi | 2509940 | 2013-06-20 10:18:55 +0800 | [diff] [blame] | 374 | atomic64_t decay_counter; |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 375 | u64 last_decay; |
Alex Shi | 2509940 | 2013-06-20 10:18:55 +0800 | [diff] [blame] | 376 | atomic_long_t removed_load; |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 377 | |
Paul Turner | c566e8e | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 378 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 379 | /* Required to track per-cpu representation of a task_group */ |
Paul Turner | bb17f65 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 380 | u32 tg_runnable_contrib; |
Alex Shi | bf5b986 | 2013-06-20 10:18:54 +0800 | [diff] [blame] | 381 | unsigned long tg_load_contrib; |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 382 | |
| 383 | /* |
| 384 | * h_load = weight * f(tg) |
| 385 | * |
| 386 | * Where f(tg) is the recursive weight fraction assigned to |
| 387 | * this group. |
| 388 | */ |
| 389 | unsigned long h_load; |
Vladimir Davydov | 6852079 | 2013-07-15 17:49:19 +0400 | [diff] [blame] | 390 | u64 last_h_load_update; |
| 391 | struct sched_entity *h_load_next; |
| 392 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Paul Turner | 8295836 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 393 | #endif /* CONFIG_SMP */ |
| 394 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 395 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 396 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| 397 | |
| 398 | /* |
| 399 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
| 400 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| 401 | * (like users, containers etc.) |
| 402 | * |
| 403 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
| 404 | * list is used during load balance. |
| 405 | */ |
| 406 | int on_list; |
| 407 | struct list_head leaf_cfs_rq_list; |
| 408 | struct task_group *tg; /* group that "owns" this runqueue */ |
| 409 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 410 | #ifdef CONFIG_CFS_BANDWIDTH |
| 411 | int runtime_enabled; |
| 412 | u64 runtime_expires; |
| 413 | s64 runtime_remaining; |
| 414 | |
Paul Turner | f1b1728 | 2012-10-04 13:18:31 +0200 | [diff] [blame] | 415 | u64 throttled_clock, throttled_clock_task; |
| 416 | u64 throttled_clock_task_time; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 417 | int throttled, throttle_count; |
| 418 | struct list_head throttled_list; |
| 419 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 420 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 421 | }; |
| 422 | |
| 423 | static inline int rt_bandwidth_enabled(void) |
| 424 | { |
| 425 | return sysctl_sched_rt_runtime >= 0; |
| 426 | } |
| 427 | |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 428 | /* RT IPI pull logic requires IRQ_WORK */ |
| 429 | #ifdef CONFIG_IRQ_WORK |
| 430 | # define HAVE_RT_PUSH_IPI |
| 431 | #endif |
| 432 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 433 | /* Real-Time classes' related field in a runqueue: */ |
| 434 | struct rt_rq { |
| 435 | struct rt_prio_array active; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 436 | unsigned int rt_nr_running; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 437 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
| 438 | struct { |
| 439 | int curr; /* highest queued rt task prio */ |
| 440 | #ifdef CONFIG_SMP |
| 441 | int next; /* next highest */ |
| 442 | #endif |
| 443 | } highest_prio; |
| 444 | #endif |
| 445 | #ifdef CONFIG_SMP |
| 446 | unsigned long rt_nr_migratory; |
| 447 | unsigned long rt_nr_total; |
| 448 | int overloaded; |
| 449 | struct plist_head pushable_tasks; |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 450 | #ifdef HAVE_RT_PUSH_IPI |
| 451 | int push_flags; |
| 452 | int push_cpu; |
| 453 | struct irq_work push_work; |
| 454 | raw_spinlock_t push_lock; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 455 | #endif |
Steven Rostedt | b6366f0 | 2015-03-18 14:49:46 -0400 | [diff] [blame] | 456 | #endif /* CONFIG_SMP */ |
Kirill Tkhai | f4ebcbc | 2014-03-15 02:15:00 +0400 | [diff] [blame] | 457 | int rt_queued; |
| 458 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 459 | int rt_throttled; |
| 460 | u64 rt_time; |
| 461 | u64 rt_runtime; |
| 462 | /* Nests inside the rq lock: */ |
| 463 | raw_spinlock_t rt_runtime_lock; |
| 464 | |
| 465 | #ifdef CONFIG_RT_GROUP_SCHED |
| 466 | unsigned long rt_nr_boosted; |
| 467 | |
| 468 | struct rq *rq; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 469 | struct task_group *tg; |
| 470 | #endif |
| 471 | }; |
| 472 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 473 | /* Deadline class' related fields in a runqueue */ |
| 474 | struct dl_rq { |
| 475 | /* runqueue is an rbtree, ordered by deadline */ |
| 476 | struct rb_root rb_root; |
| 477 | struct rb_node *rb_leftmost; |
| 478 | |
| 479 | unsigned long dl_nr_running; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 480 | |
| 481 | #ifdef CONFIG_SMP |
| 482 | /* |
| 483 | * Deadline values of the currently executing and the |
| 484 | * earliest ready task on this rq. Caching these facilitates |
| 485 | * the decision wether or not a ready but not running task |
| 486 | * should migrate somewhere else. |
| 487 | */ |
| 488 | struct { |
| 489 | u64 curr; |
| 490 | u64 next; |
| 491 | } earliest_dl; |
| 492 | |
| 493 | unsigned long dl_nr_migratory; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 494 | int overloaded; |
| 495 | |
| 496 | /* |
| 497 | * Tasks on this rq that can be pushed away. They are kept in |
| 498 | * an rb-tree, ordered by tasks' deadlines, with caching |
| 499 | * of the leftmost (earliest deadline) element. |
| 500 | */ |
| 501 | struct rb_root pushable_dl_tasks_root; |
| 502 | struct rb_node *pushable_dl_tasks_leftmost; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 503 | #else |
| 504 | struct dl_bw dl_bw; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 505 | #endif |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 506 | }; |
| 507 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 508 | #ifdef CONFIG_SMP |
| 509 | |
| 510 | /* |
| 511 | * We add the notion of a root-domain which will be used to define per-domain |
| 512 | * variables. Each exclusive cpuset essentially defines an island domain by |
| 513 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
| 514 | * exclusive cpuset is created, we also create and attach a new root-domain |
| 515 | * object. |
| 516 | * |
| 517 | */ |
| 518 | struct root_domain { |
| 519 | atomic_t refcount; |
| 520 | atomic_t rto_count; |
| 521 | struct rcu_head rcu; |
| 522 | cpumask_var_t span; |
| 523 | cpumask_var_t online; |
| 524 | |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 525 | /* Indicate more than one runnable task for any CPU */ |
| 526 | bool overload; |
| 527 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 528 | /* |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 529 | * The bit corresponding to a CPU gets set here if such CPU has more |
| 530 | * than one runnable -deadline task (as it is below for RT tasks). |
| 531 | */ |
| 532 | cpumask_var_t dlo_mask; |
| 533 | atomic_t dlo_count; |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 534 | struct dl_bw dl_bw; |
Juri Lelli | 6bfd6d7 | 2013-11-07 14:43:47 +0100 | [diff] [blame] | 535 | struct cpudl cpudl; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 536 | |
| 537 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 538 | * The "RT overload" flag: it gets set if a CPU has more than |
| 539 | * one runnable RT task. |
| 540 | */ |
| 541 | cpumask_var_t rto_mask; |
| 542 | struct cpupri cpupri; |
| 543 | }; |
| 544 | |
| 545 | extern struct root_domain def_root_domain; |
| 546 | |
| 547 | #endif /* CONFIG_SMP */ |
| 548 | |
| 549 | /* |
| 550 | * This is the main, per-CPU runqueue data structure. |
| 551 | * |
| 552 | * Locking rule: those places that want to lock multiple runqueues |
| 553 | * (such as the load balancing or the thread migration code), lock |
| 554 | * acquire operations must be ordered by ascending &runqueue. |
| 555 | */ |
| 556 | struct rq { |
| 557 | /* runqueue lock: */ |
| 558 | raw_spinlock_t lock; |
| 559 | |
| 560 | /* |
| 561 | * nr_running and cpu_load should be in the same cacheline because |
| 562 | * remote CPUs use both these fields when doing load calculation. |
| 563 | */ |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 564 | unsigned int nr_running; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 565 | #ifdef CONFIG_NUMA_BALANCING |
| 566 | unsigned int nr_numa_running; |
| 567 | unsigned int nr_preferred_running; |
| 568 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 569 | #define CPU_LOAD_IDX_MAX 5 |
| 570 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
| 571 | unsigned long last_load_update_tick; |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 572 | #ifdef CONFIG_NO_HZ_COMMON |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 573 | u64 nohz_stamp; |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 574 | unsigned long nohz_flags; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 575 | #endif |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 576 | #ifdef CONFIG_NO_HZ_FULL |
| 577 | unsigned long last_sched_tick; |
| 578 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 579 | /* capture load from *all* tasks on this cpu: */ |
| 580 | struct load_weight load; |
| 581 | unsigned long nr_load_updates; |
| 582 | u64 nr_switches; |
| 583 | |
| 584 | struct cfs_rq cfs; |
| 585 | struct rt_rq rt; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 586 | struct dl_rq dl; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 587 | |
| 588 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 589 | /* list of leaf cfs_rq on this cpu: */ |
| 590 | struct list_head leaf_cfs_rq_list; |
Dietmar Eggemann | f5f9739 | 2014-02-26 11:19:33 +0000 | [diff] [blame] | 591 | |
| 592 | struct sched_avg avg; |
Peter Zijlstra | a35b646 | 2012-08-08 21:46:40 +0200 | [diff] [blame] | 593 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 594 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 595 | /* |
| 596 | * This is part of a global counter where only the total sum |
| 597 | * over all CPUs matters. A task can increase this counter on |
| 598 | * one CPU and if it got migrated afterwards it may decrease |
| 599 | * it on another CPU. Always updated under the runqueue lock: |
| 600 | */ |
| 601 | unsigned long nr_uninterruptible; |
| 602 | |
| 603 | struct task_struct *curr, *idle, *stop; |
| 604 | unsigned long next_balance; |
| 605 | struct mm_struct *prev_mm; |
| 606 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 607 | unsigned int clock_skip_update; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 608 | u64 clock; |
| 609 | u64 clock_task; |
| 610 | |
| 611 | atomic_t nr_iowait; |
| 612 | |
| 613 | #ifdef CONFIG_SMP |
| 614 | struct root_domain *rd; |
| 615 | struct sched_domain *sd; |
| 616 | |
Nicolas Pitre | ced549f | 2014-05-26 18:19:38 -0400 | [diff] [blame] | 617 | unsigned long cpu_capacity; |
Vincent Guittot | ca6d75e | 2015-02-27 16:54:09 +0100 | [diff] [blame] | 618 | unsigned long cpu_capacity_orig; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 619 | |
| 620 | unsigned char idle_balance; |
| 621 | /* For active balancing */ |
| 622 | int post_schedule; |
| 623 | int active_balance; |
| 624 | int push_cpu; |
| 625 | struct cpu_stop_work active_balance_work; |
| 626 | /* cpu of this runqueue: */ |
| 627 | int cpu; |
| 628 | int online; |
| 629 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 630 | struct list_head cfs_tasks; |
| 631 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 632 | u64 rt_avg; |
| 633 | u64 age_stamp; |
| 634 | u64 idle_stamp; |
| 635 | u64 avg_idle; |
Jason Low | 9bd721c | 2013-09-13 11:26:52 -0700 | [diff] [blame] | 636 | |
| 637 | /* This is used to determine avg_idle's max value */ |
| 638 | u64 max_idle_balance_cost; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 639 | #endif |
| 640 | |
| 641 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 642 | u64 prev_irq_time; |
| 643 | #endif |
| 644 | #ifdef CONFIG_PARAVIRT |
| 645 | u64 prev_steal_time; |
| 646 | #endif |
| 647 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 648 | u64 prev_steal_time_rq; |
| 649 | #endif |
| 650 | |
| 651 | /* calc_load related fields */ |
| 652 | unsigned long calc_load_update; |
| 653 | long calc_load_active; |
| 654 | |
| 655 | #ifdef CONFIG_SCHED_HRTICK |
| 656 | #ifdef CONFIG_SMP |
| 657 | int hrtick_csd_pending; |
| 658 | struct call_single_data hrtick_csd; |
| 659 | #endif |
| 660 | struct hrtimer hrtick_timer; |
| 661 | #endif |
| 662 | |
| 663 | #ifdef CONFIG_SCHEDSTATS |
| 664 | /* latency stats */ |
| 665 | struct sched_info rq_sched_info; |
| 666 | unsigned long long rq_cpu_time; |
| 667 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
| 668 | |
| 669 | /* sys_sched_yield() stats */ |
| 670 | unsigned int yld_count; |
| 671 | |
| 672 | /* schedule() stats */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 673 | unsigned int sched_count; |
| 674 | unsigned int sched_goidle; |
| 675 | |
| 676 | /* try_to_wake_up() stats */ |
| 677 | unsigned int ttwu_count; |
| 678 | unsigned int ttwu_local; |
| 679 | #endif |
| 680 | |
| 681 | #ifdef CONFIG_SMP |
| 682 | struct llist_head wake_list; |
| 683 | #endif |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 684 | |
| 685 | #ifdef CONFIG_CPU_IDLE |
| 686 | /* Must be inspected within a rcu lock section */ |
| 687 | struct cpuidle_state *idle_state; |
| 688 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 689 | }; |
| 690 | |
| 691 | static inline int cpu_of(struct rq *rq) |
| 692 | { |
| 693 | #ifdef CONFIG_SMP |
| 694 | return rq->cpu; |
| 695 | #else |
| 696 | return 0; |
| 697 | #endif |
| 698 | } |
| 699 | |
Pranith Kumar | 8b06c55 | 2014-08-13 13:28:12 -0400 | [diff] [blame] | 700 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 701 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 702 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 703 | #define this_rq() this_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 704 | #define task_rq(p) cpu_rq(task_cpu(p)) |
| 705 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 706 | #define raw_rq() raw_cpu_ptr(&runqueues) |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 707 | |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 708 | static inline u64 __rq_clock_broken(struct rq *rq) |
| 709 | { |
| 710 | return ACCESS_ONCE(rq->clock); |
| 711 | } |
| 712 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 713 | static inline u64 rq_clock(struct rq *rq) |
| 714 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 715 | lockdep_assert_held(&rq->lock); |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 716 | return rq->clock; |
| 717 | } |
| 718 | |
| 719 | static inline u64 rq_clock_task(struct rq *rq) |
| 720 | { |
Peter Zijlstra | cebde6d | 2015-01-05 11:18:10 +0100 | [diff] [blame] | 721 | lockdep_assert_held(&rq->lock); |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 722 | return rq->clock_task; |
| 723 | } |
| 724 | |
Peter Zijlstra | 9edfbfe | 2015-01-05 11:18:11 +0100 | [diff] [blame] | 725 | #define RQCF_REQ_SKIP 0x01 |
| 726 | #define RQCF_ACT_SKIP 0x02 |
| 727 | |
| 728 | static inline void rq_clock_skip_update(struct rq *rq, bool skip) |
| 729 | { |
| 730 | lockdep_assert_held(&rq->lock); |
| 731 | if (skip) |
| 732 | rq->clock_skip_update |= RQCF_REQ_SKIP; |
| 733 | else |
| 734 | rq->clock_skip_update &= ~RQCF_REQ_SKIP; |
| 735 | } |
| 736 | |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 737 | #ifdef CONFIG_NUMA |
Rik van Riel | e3fe70b | 2014-10-17 03:29:50 -0400 | [diff] [blame] | 738 | enum numa_topology_type { |
| 739 | NUMA_DIRECT, |
| 740 | NUMA_GLUELESS_MESH, |
| 741 | NUMA_BACKPLANE, |
| 742 | }; |
| 743 | extern enum numa_topology_type sched_numa_topology_type; |
Rik van Riel | 9942f79 | 2014-10-17 03:29:49 -0400 | [diff] [blame] | 744 | extern int sched_max_numa_distance; |
| 745 | extern bool find_numa_distance(int distance); |
| 746 | #endif |
| 747 | |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 748 | #ifdef CONFIG_NUMA_BALANCING |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 749 | /* The regions in numa_faults array from task_struct */ |
| 750 | enum numa_faults_stats { |
| 751 | NUMA_MEM = 0, |
| 752 | NUMA_CPU, |
| 753 | NUMA_MEMBUF, |
| 754 | NUMA_CPUBUF |
| 755 | }; |
Peter Zijlstra | 0ec8aa0 | 2013-10-07 11:29:33 +0100 | [diff] [blame] | 756 | extern void sched_setnuma(struct task_struct *p, int node); |
Mel Gorman | e6628d5 | 2013-10-07 11:29:02 +0100 | [diff] [blame] | 757 | extern int migrate_task_to(struct task_struct *p, int cpu); |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 758 | extern int migrate_swap(struct task_struct *, struct task_struct *); |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 759 | #endif /* CONFIG_NUMA_BALANCING */ |
| 760 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 761 | #ifdef CONFIG_SMP |
| 762 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 763 | extern void sched_ttwu_pending(void); |
| 764 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 765 | #define rcu_dereference_check_sched_domain(p) \ |
| 766 | rcu_dereference_check((p), \ |
| 767 | lockdep_is_held(&sched_domains_mutex)) |
| 768 | |
| 769 | /* |
| 770 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
| 771 | * See detach_destroy_domains: synchronize_sched for details. |
| 772 | * |
| 773 | * The domain tree of any CPU may only be accessed from within |
| 774 | * preempt-disabled sections. |
| 775 | */ |
| 776 | #define for_each_domain(cpu, __sd) \ |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 777 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ |
| 778 | __sd; __sd = __sd->parent) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 779 | |
Suresh Siddha | 77e8136 | 2011-11-17 11:08:23 -0800 | [diff] [blame] | 780 | #define for_each_lower_domain(sd) for (; sd; sd = sd->child) |
| 781 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 782 | /** |
| 783 | * highest_flag_domain - Return highest sched_domain containing flag. |
| 784 | * @cpu: The cpu whose highest level of sched domain is to |
| 785 | * be returned. |
| 786 | * @flag: The flag to check for the highest sched_domain |
| 787 | * for the given cpu. |
| 788 | * |
| 789 | * Returns the highest sched_domain of a cpu which contains the given flag. |
| 790 | */ |
| 791 | static inline struct sched_domain *highest_flag_domain(int cpu, int flag) |
| 792 | { |
| 793 | struct sched_domain *sd, *hsd = NULL; |
| 794 | |
| 795 | for_each_domain(cpu, sd) { |
| 796 | if (!(sd->flags & flag)) |
| 797 | break; |
| 798 | hsd = sd; |
| 799 | } |
| 800 | |
| 801 | return hsd; |
| 802 | } |
| 803 | |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 804 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) |
| 805 | { |
| 806 | struct sched_domain *sd; |
| 807 | |
| 808 | for_each_domain(cpu, sd) { |
| 809 | if (sd->flags & flag) |
| 810 | break; |
| 811 | } |
| 812 | |
| 813 | return sd; |
| 814 | } |
| 815 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 816 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
Peter Zijlstra | 7d9ffa8 | 2013-07-04 12:56:46 +0800 | [diff] [blame] | 817 | DECLARE_PER_CPU(int, sd_llc_size); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 818 | DECLARE_PER_CPU(int, sd_llc_id); |
Mel Gorman | fb13c7e | 2013-10-07 11:29:17 +0100 | [diff] [blame] | 819 | DECLARE_PER_CPU(struct sched_domain *, sd_numa); |
Preeti U Murthy | 37dc6b5 | 2013-10-30 08:42:52 +0530 | [diff] [blame] | 820 | DECLARE_PER_CPU(struct sched_domain *, sd_busy); |
| 821 | DECLARE_PER_CPU(struct sched_domain *, sd_asym); |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 822 | |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 823 | struct sched_group_capacity { |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 824 | atomic_t ref; |
| 825 | /* |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 826 | * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity |
| 827 | * for a single CPU. |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 828 | */ |
Vincent Guittot | dc7ff76 | 2015-03-03 11:35:03 +0100 | [diff] [blame] | 829 | unsigned int capacity; |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 830 | unsigned long next_update; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 831 | int imbalance; /* XXX unrelated to capacity but shared group state */ |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 832 | /* |
| 833 | * Number of busy cpus in this group. |
| 834 | */ |
| 835 | atomic_t nr_busy_cpus; |
| 836 | |
| 837 | unsigned long cpumask[0]; /* iteration mask */ |
| 838 | }; |
| 839 | |
| 840 | struct sched_group { |
| 841 | struct sched_group *next; /* Must be a circular list */ |
| 842 | atomic_t ref; |
| 843 | |
| 844 | unsigned int group_weight; |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 845 | struct sched_group_capacity *sgc; |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 846 | |
| 847 | /* |
| 848 | * The CPUs this group covers. |
| 849 | * |
| 850 | * NOTE: this field is variable length. (Allocated dynamically |
| 851 | * by attaching extra space to the end of the structure, |
| 852 | * depending on how many CPUs the kernel has booted up with) |
| 853 | */ |
| 854 | unsigned long cpumask[0]; |
| 855 | }; |
| 856 | |
| 857 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
| 858 | { |
| 859 | return to_cpumask(sg->cpumask); |
| 860 | } |
| 861 | |
| 862 | /* |
| 863 | * cpumask masking which cpus in the group are allowed to iterate up the domain |
| 864 | * tree. |
| 865 | */ |
| 866 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) |
| 867 | { |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 868 | return to_cpumask(sg->sgc->cpumask); |
Li Zefan | 5e6521e | 2013-03-05 16:06:23 +0800 | [diff] [blame] | 869 | } |
| 870 | |
| 871 | /** |
| 872 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. |
| 873 | * @group: The group whose first cpu is to be returned. |
| 874 | */ |
| 875 | static inline unsigned int group_first_cpu(struct sched_group *group) |
| 876 | { |
| 877 | return cpumask_first(sched_group_cpus(group)); |
| 878 | } |
| 879 | |
Peter Zijlstra | c117487 | 2012-05-31 14:47:33 +0200 | [diff] [blame] | 880 | extern int group_balance_cpu(struct sched_group *sg); |
| 881 | |
Peter Zijlstra | e3baac4 | 2014-06-04 10:31:18 -0700 | [diff] [blame] | 882 | #else |
| 883 | |
| 884 | static inline void sched_ttwu_pending(void) { } |
| 885 | |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 886 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 887 | |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 888 | #include "stats.h" |
| 889 | #include "auto_group.h" |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 890 | |
| 891 | #ifdef CONFIG_CGROUP_SCHED |
| 892 | |
| 893 | /* |
| 894 | * Return the group to which this tasks belongs. |
| 895 | * |
Tejun Heo | 8af01f5 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 896 | * We cannot use task_css() and friends because the cgroup subsystem |
| 897 | * changes that value before the cgroup_subsys::attach() method is called, |
| 898 | * therefore we cannot pin it and might observe the wrong value. |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 899 | * |
| 900 | * The same is true for autogroup's p->signal->autogroup->tg, the autogroup |
| 901 | * core changes this before calling sched_move_task(). |
| 902 | * |
| 903 | * Instead we use a 'copy' which is updated from sched_move_task() while |
| 904 | * holding both task_struct::pi_lock and rq::lock. |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 905 | */ |
| 906 | static inline struct task_group *task_group(struct task_struct *p) |
| 907 | { |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 908 | return p->sched_task_group; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 909 | } |
| 910 | |
| 911 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 912 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 913 | { |
| 914 | #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) |
| 915 | struct task_group *tg = task_group(p); |
| 916 | #endif |
| 917 | |
| 918 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 919 | p->se.cfs_rq = tg->cfs_rq[cpu]; |
| 920 | p->se.parent = tg->se[cpu]; |
| 921 | #endif |
| 922 | |
| 923 | #ifdef CONFIG_RT_GROUP_SCHED |
| 924 | p->rt.rt_rq = tg->rt_rq[cpu]; |
| 925 | p->rt.parent = tg->rt_se[cpu]; |
| 926 | #endif |
| 927 | } |
| 928 | |
| 929 | #else /* CONFIG_CGROUP_SCHED */ |
| 930 | |
| 931 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| 932 | static inline struct task_group *task_group(struct task_struct *p) |
| 933 | { |
| 934 | return NULL; |
| 935 | } |
| 936 | |
| 937 | #endif /* CONFIG_CGROUP_SCHED */ |
| 938 | |
| 939 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 940 | { |
| 941 | set_task_rq(p, cpu); |
| 942 | #ifdef CONFIG_SMP |
| 943 | /* |
| 944 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
| 945 | * successfuly executed on another CPU. We must ensure that updates of |
| 946 | * per-task data have been completed by this moment. |
| 947 | */ |
| 948 | smp_wmb(); |
| 949 | task_thread_info(p)->cpu = cpu; |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 950 | p->wake_cpu = cpu; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 951 | #endif |
| 952 | } |
| 953 | |
| 954 | /* |
| 955 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| 956 | */ |
| 957 | #ifdef CONFIG_SCHED_DEBUG |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 958 | # include <linux/static_key.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 959 | # define const_debug __read_mostly |
| 960 | #else |
| 961 | # define const_debug const |
| 962 | #endif |
| 963 | |
| 964 | extern const_debug unsigned int sysctl_sched_features; |
| 965 | |
| 966 | #define SCHED_FEAT(name, enabled) \ |
| 967 | __SCHED_FEAT_##name , |
| 968 | |
| 969 | enum { |
Peter Zijlstra | 391e43d | 2011-11-15 17:14:39 +0100 | [diff] [blame] | 970 | #include "features.h" |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 971 | __SCHED_FEAT_NR, |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 972 | }; |
| 973 | |
| 974 | #undef SCHED_FEAT |
| 975 | |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 976 | #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 977 | #define SCHED_FEAT(name, enabled) \ |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 978 | static __always_inline bool static_branch_##name(struct static_key *key) \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 979 | { \ |
Jason Baron | 6e76ea8 | 2014-07-02 15:52:41 +0000 | [diff] [blame] | 980 | return static_key_##enabled(key); \ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 981 | } |
| 982 | |
| 983 | #include "features.h" |
| 984 | |
| 985 | #undef SCHED_FEAT |
| 986 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 987 | extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 988 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
| 989 | #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 990 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 991 | #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 992 | |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 993 | #ifdef CONFIG_NUMA_BALANCING |
| 994 | #define sched_feat_numa(x) sched_feat(x) |
Mel Gorman | 3105b86 | 2012-11-23 11:23:49 +0000 | [diff] [blame] | 995 | #ifdef CONFIG_SCHED_DEBUG |
| 996 | #define numabalancing_enabled sched_feat_numa(NUMA) |
| 997 | #else |
| 998 | extern bool numabalancing_enabled; |
| 999 | #endif /* CONFIG_SCHED_DEBUG */ |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1000 | #else |
| 1001 | #define sched_feat_numa(x) (0) |
Mel Gorman | 3105b86 | 2012-11-23 11:23:49 +0000 | [diff] [blame] | 1002 | #define numabalancing_enabled (0) |
| 1003 | #endif /* CONFIG_NUMA_BALANCING */ |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1004 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1005 | static inline u64 global_rt_period(void) |
| 1006 | { |
| 1007 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| 1008 | } |
| 1009 | |
| 1010 | static inline u64 global_rt_runtime(void) |
| 1011 | { |
| 1012 | if (sysctl_sched_rt_runtime < 0) |
| 1013 | return RUNTIME_INF; |
| 1014 | |
| 1015 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| 1016 | } |
| 1017 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1018 | static inline int task_current(struct rq *rq, struct task_struct *p) |
| 1019 | { |
| 1020 | return rq->curr == p; |
| 1021 | } |
| 1022 | |
| 1023 | static inline int task_running(struct rq *rq, struct task_struct *p) |
| 1024 | { |
| 1025 | #ifdef CONFIG_SMP |
| 1026 | return p->on_cpu; |
| 1027 | #else |
| 1028 | return task_current(rq, p); |
| 1029 | #endif |
| 1030 | } |
| 1031 | |
Kirill Tkhai | da0c1e6 | 2014-08-20 13:47:32 +0400 | [diff] [blame] | 1032 | static inline int task_on_rq_queued(struct task_struct *p) |
| 1033 | { |
| 1034 | return p->on_rq == TASK_ON_RQ_QUEUED; |
| 1035 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1036 | |
Kirill Tkhai | cca26e8 | 2014-08-20 13:47:42 +0400 | [diff] [blame] | 1037 | static inline int task_on_rq_migrating(struct task_struct *p) |
| 1038 | { |
| 1039 | return p->on_rq == TASK_ON_RQ_MIGRATING; |
| 1040 | } |
| 1041 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1042 | #ifndef prepare_arch_switch |
| 1043 | # define prepare_arch_switch(next) do { } while (0) |
| 1044 | #endif |
| 1045 | #ifndef finish_arch_switch |
| 1046 | # define finish_arch_switch(prev) do { } while (0) |
| 1047 | #endif |
Catalin Marinas | 01f23e1 | 2011-11-27 21:43:10 +0000 | [diff] [blame] | 1048 | #ifndef finish_arch_post_lock_switch |
| 1049 | # define finish_arch_post_lock_switch() do { } while (0) |
| 1050 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1051 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1052 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
| 1053 | { |
| 1054 | #ifdef CONFIG_SMP |
| 1055 | /* |
| 1056 | * We can optimise this out completely for !SMP, because the |
| 1057 | * SMP rebalancing from interrupt is the only thing that cares |
| 1058 | * here. |
| 1059 | */ |
| 1060 | next->on_cpu = 1; |
| 1061 | #endif |
| 1062 | } |
| 1063 | |
| 1064 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
| 1065 | { |
| 1066 | #ifdef CONFIG_SMP |
| 1067 | /* |
| 1068 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
| 1069 | * We must ensure this doesn't happen until the switch is completely |
| 1070 | * finished. |
| 1071 | */ |
| 1072 | smp_wmb(); |
| 1073 | prev->on_cpu = 0; |
| 1074 | #endif |
| 1075 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 1076 | /* this is a valid case when another task releases the spinlock */ |
| 1077 | rq->lock.owner = current; |
| 1078 | #endif |
| 1079 | /* |
| 1080 | * If we are tracking spinlock dependencies then we have to |
| 1081 | * fix up the runqueue lock - which gets 'carried over' from |
| 1082 | * prev into current: |
| 1083 | */ |
| 1084 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| 1085 | |
| 1086 | raw_spin_unlock_irq(&rq->lock); |
| 1087 | } |
| 1088 | |
Li Zefan | b13095f | 2013-03-05 16:06:38 +0800 | [diff] [blame] | 1089 | /* |
| 1090 | * wake flags |
| 1091 | */ |
| 1092 | #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ |
| 1093 | #define WF_FORK 0x02 /* child wakeup after fork */ |
| 1094 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ |
| 1095 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1096 | /* |
| 1097 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| 1098 | * of tasks with abnormal "nice" values across CPUs the contribution that |
| 1099 | * each task makes to its run queue's load is weighted according to its |
| 1100 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
| 1101 | * scaled version of the new time slice allocation that they receive on time |
| 1102 | * slice expiry etc. |
| 1103 | */ |
| 1104 | |
| 1105 | #define WEIGHT_IDLEPRIO 3 |
| 1106 | #define WMULT_IDLEPRIO 1431655765 |
| 1107 | |
| 1108 | /* |
| 1109 | * Nice levels are multiplicative, with a gentle 10% change for every |
| 1110 | * nice level changed. I.e. when a CPU-bound task goes from nice 0 to |
| 1111 | * nice 1, it will get ~10% less CPU time than another CPU-bound task |
| 1112 | * that remained on nice 0. |
| 1113 | * |
| 1114 | * The "10% effect" is relative and cumulative: from _any_ nice level, |
| 1115 | * if you go up 1 level, it's -10% CPU usage, if you go down 1 level |
| 1116 | * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. |
| 1117 | * If a task goes up by ~10% and another task goes down by ~10% then |
| 1118 | * the relative distance between them is ~25%.) |
| 1119 | */ |
| 1120 | static const int prio_to_weight[40] = { |
| 1121 | /* -20 */ 88761, 71755, 56483, 46273, 36291, |
| 1122 | /* -15 */ 29154, 23254, 18705, 14949, 11916, |
| 1123 | /* -10 */ 9548, 7620, 6100, 4904, 3906, |
| 1124 | /* -5 */ 3121, 2501, 1991, 1586, 1277, |
| 1125 | /* 0 */ 1024, 820, 655, 526, 423, |
| 1126 | /* 5 */ 335, 272, 215, 172, 137, |
| 1127 | /* 10 */ 110, 87, 70, 56, 45, |
| 1128 | /* 15 */ 36, 29, 23, 18, 15, |
| 1129 | }; |
| 1130 | |
| 1131 | /* |
| 1132 | * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. |
| 1133 | * |
| 1134 | * In cases where the weight does not change often, we can use the |
| 1135 | * precalculated inverse to speed up arithmetics by turning divisions |
| 1136 | * into multiplications: |
| 1137 | */ |
| 1138 | static const u32 prio_to_wmult[40] = { |
| 1139 | /* -20 */ 48388, 59856, 76040, 92818, 118348, |
| 1140 | /* -15 */ 147320, 184698, 229616, 287308, 360437, |
| 1141 | /* -10 */ 449829, 563644, 704093, 875809, 1099582, |
| 1142 | /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, |
| 1143 | /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, |
| 1144 | /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, |
| 1145 | /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, |
| 1146 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
| 1147 | }; |
| 1148 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1149 | #define ENQUEUE_WAKEUP 1 |
| 1150 | #define ENQUEUE_HEAD 2 |
| 1151 | #ifdef CONFIG_SMP |
| 1152 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ |
| 1153 | #else |
| 1154 | #define ENQUEUE_WAKING 0 |
| 1155 | #endif |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1156 | #define ENQUEUE_REPLENISH 8 |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1157 | |
| 1158 | #define DEQUEUE_SLEEP 1 |
| 1159 | |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1160 | #define RETRY_TASK ((void *)-1UL) |
| 1161 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1162 | struct sched_class { |
| 1163 | const struct sched_class *next; |
| 1164 | |
| 1165 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1166 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
| 1167 | void (*yield_task) (struct rq *rq); |
| 1168 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); |
| 1169 | |
| 1170 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
| 1171 | |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1172 | /* |
| 1173 | * It is the responsibility of the pick_next_task() method that will |
| 1174 | * return the next task to call put_prev_task() on the @prev task or |
| 1175 | * something equivalent. |
Peter Zijlstra | 37e117c | 2014-02-14 12:25:08 +0100 | [diff] [blame] | 1176 | * |
| 1177 | * May return RETRY_TASK when it finds a higher prio class has runnable |
| 1178 | * tasks. |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 1179 | */ |
| 1180 | struct task_struct * (*pick_next_task) (struct rq *rq, |
| 1181 | struct task_struct *prev); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1182 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
| 1183 | |
| 1184 | #ifdef CONFIG_SMP |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 1185 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1186 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); |
| 1187 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1188 | void (*post_schedule) (struct rq *this_rq); |
| 1189 | void (*task_waking) (struct task_struct *task); |
| 1190 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
| 1191 | |
| 1192 | void (*set_cpus_allowed)(struct task_struct *p, |
| 1193 | const struct cpumask *newmask); |
| 1194 | |
| 1195 | void (*rq_online)(struct rq *rq); |
| 1196 | void (*rq_offline)(struct rq *rq); |
| 1197 | #endif |
| 1198 | |
| 1199 | void (*set_curr_task) (struct rq *rq); |
| 1200 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
| 1201 | void (*task_fork) (struct task_struct *p); |
Dario Faggioli | e6c390f | 2013-11-07 14:43:35 +0100 | [diff] [blame] | 1202 | void (*task_dead) (struct task_struct *p); |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1203 | |
Kirill Tkhai | 67dfa1b | 2014-10-27 17:40:52 +0300 | [diff] [blame] | 1204 | /* |
| 1205 | * The switched_from() call is allowed to drop rq->lock, therefore we |
| 1206 | * cannot assume the switched_from/switched_to pair is serliazed by |
| 1207 | * rq->lock. They are however serialized by p->pi_lock. |
| 1208 | */ |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1209 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); |
| 1210 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
| 1211 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
| 1212 | int oldprio); |
| 1213 | |
| 1214 | unsigned int (*get_rr_interval) (struct rq *rq, |
| 1215 | struct task_struct *task); |
| 1216 | |
Stanislaw Gruszka | 6e99891 | 2014-11-12 16:58:44 +0100 | [diff] [blame] | 1217 | void (*update_curr) (struct rq *rq); |
| 1218 | |
Li Zefan | c82ba9f | 2013-03-05 16:06:55 +0800 | [diff] [blame] | 1219 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1220 | void (*task_move_group) (struct task_struct *p, int on_rq); |
| 1221 | #endif |
| 1222 | }; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1223 | |
Peter Zijlstra | 3f1d2a3 | 2014-02-12 10:49:30 +0100 | [diff] [blame] | 1224 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
| 1225 | { |
| 1226 | prev->sched_class->put_prev_task(rq, prev); |
| 1227 | } |
| 1228 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1229 | #define sched_class_highest (&stop_sched_class) |
| 1230 | #define for_each_class(class) \ |
| 1231 | for (class = sched_class_highest; class; class = class->next) |
| 1232 | |
| 1233 | extern const struct sched_class stop_sched_class; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1234 | extern const struct sched_class dl_sched_class; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1235 | extern const struct sched_class rt_sched_class; |
| 1236 | extern const struct sched_class fair_sched_class; |
| 1237 | extern const struct sched_class idle_sched_class; |
| 1238 | |
| 1239 | |
| 1240 | #ifdef CONFIG_SMP |
| 1241 | |
Nicolas Pitre | 63b2ca3 | 2014-05-26 18:19:37 -0400 | [diff] [blame] | 1242 | extern void update_group_capacity(struct sched_domain *sd, int cpu); |
Li Zefan | b719203 | 2013-03-07 10:00:26 +0800 | [diff] [blame] | 1243 | |
Daniel Lezcano | 7caff66 | 2014-01-06 12:34:38 +0100 | [diff] [blame] | 1244 | extern void trigger_load_balance(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1245 | |
Vincent Guittot | 642dbc3 | 2013-04-18 18:34:26 +0200 | [diff] [blame] | 1246 | extern void idle_enter_fair(struct rq *this_rq); |
| 1247 | extern void idle_exit_fair(struct rq *this_rq); |
Vincent Guittot | 642dbc3 | 2013-04-18 18:34:26 +0200 | [diff] [blame] | 1248 | |
Peter Zijlstra | dc87734 | 2014-02-12 15:47:29 +0100 | [diff] [blame] | 1249 | #else |
| 1250 | |
| 1251 | static inline void idle_enter_fair(struct rq *rq) { } |
| 1252 | static inline void idle_exit_fair(struct rq *rq) { } |
| 1253 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1254 | #endif |
| 1255 | |
Daniel Lezcano | 442bf3a | 2014-09-04 11:32:09 -0400 | [diff] [blame] | 1256 | #ifdef CONFIG_CPU_IDLE |
| 1257 | static inline void idle_set_state(struct rq *rq, |
| 1258 | struct cpuidle_state *idle_state) |
| 1259 | { |
| 1260 | rq->idle_state = idle_state; |
| 1261 | } |
| 1262 | |
| 1263 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1264 | { |
| 1265 | WARN_ON(!rcu_read_lock_held()); |
| 1266 | return rq->idle_state; |
| 1267 | } |
| 1268 | #else |
| 1269 | static inline void idle_set_state(struct rq *rq, |
| 1270 | struct cpuidle_state *idle_state) |
| 1271 | { |
| 1272 | } |
| 1273 | |
| 1274 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| 1275 | { |
| 1276 | return NULL; |
| 1277 | } |
| 1278 | #endif |
| 1279 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1280 | extern void sysrq_sched_debug_show(void); |
| 1281 | extern void sched_init_granularity(void); |
| 1282 | extern void update_max_interval(void); |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 1283 | |
| 1284 | extern void init_sched_dl_class(void); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1285 | extern void init_sched_rt_class(void); |
| 1286 | extern void init_sched_fair_class(void); |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1287 | extern void init_sched_dl_class(void); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1288 | |
Kirill Tkhai | 8875125 | 2014-06-29 00:03:57 +0400 | [diff] [blame] | 1289 | extern void resched_curr(struct rq *rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1290 | extern void resched_cpu(int cpu); |
| 1291 | |
| 1292 | extern struct rt_bandwidth def_rt_bandwidth; |
| 1293 | extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); |
| 1294 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1295 | extern struct dl_bandwidth def_dl_bandwidth; |
| 1296 | extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1297 | extern void init_dl_task_timer(struct sched_dl_entity *dl_se); |
| 1298 | |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 1299 | unsigned long to_ratio(u64 period, u64 runtime); |
| 1300 | |
Peter Zijlstra | 556061b | 2012-05-11 17:31:26 +0200 | [diff] [blame] | 1301 | extern void update_idle_cpu_load(struct rq *this_rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1302 | |
Alex Shi | a75cdaa | 2013-06-20 10:18:47 +0800 | [diff] [blame] | 1303 | extern void init_task_runnable_average(struct task_struct *p); |
| 1304 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1305 | static inline void add_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1306 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1307 | unsigned prev_nr = rq->nr_running; |
| 1308 | |
| 1309 | rq->nr_running = prev_nr + count; |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1310 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1311 | if (prev_nr < 2 && rq->nr_running >= 2) { |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1312 | #ifdef CONFIG_SMP |
| 1313 | if (!rq->rd->overload) |
| 1314 | rq->rd->overload = true; |
| 1315 | #endif |
| 1316 | |
| 1317 | #ifdef CONFIG_NO_HZ_FULL |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1318 | if (tick_nohz_full_cpu(rq->cpu)) { |
Frederic Weisbecker | 3882ec6 | 2014-03-18 22:54:04 +0100 | [diff] [blame] | 1319 | /* |
| 1320 | * Tick is needed if more than one task runs on a CPU. |
| 1321 | * Send the target an IPI to kick it out of nohz mode. |
| 1322 | * |
| 1323 | * We assume that IPI implies full memory barrier and the |
| 1324 | * new value of rq->nr_running is visible on reception |
| 1325 | * from the target. |
| 1326 | */ |
Frederic Weisbecker | fd2ac4f | 2014-03-18 21:12:53 +0100 | [diff] [blame] | 1327 | tick_nohz_full_kick_cpu(rq->cpu); |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1328 | } |
Frederic Weisbecker | 9f3660c | 2013-04-20 14:35:09 +0200 | [diff] [blame] | 1329 | #endif |
Tim Chen | 4486edd | 2014-06-23 12:16:49 -0700 | [diff] [blame] | 1330 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1331 | } |
| 1332 | |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1333 | static inline void sub_nr_running(struct rq *rq, unsigned count) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1334 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 1335 | rq->nr_running -= count; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1336 | } |
| 1337 | |
Frederic Weisbecker | 265f22a | 2013-05-03 03:39:05 +0200 | [diff] [blame] | 1338 | static inline void rq_last_tick_reset(struct rq *rq) |
| 1339 | { |
| 1340 | #ifdef CONFIG_NO_HZ_FULL |
| 1341 | rq->last_sched_tick = jiffies; |
| 1342 | #endif |
| 1343 | } |
| 1344 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1345 | extern void update_rq_clock(struct rq *rq); |
| 1346 | |
| 1347 | extern void activate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1348 | extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); |
| 1349 | |
| 1350 | extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
| 1351 | |
| 1352 | extern const_debug unsigned int sysctl_sched_time_avg; |
| 1353 | extern const_debug unsigned int sysctl_sched_nr_migrate; |
| 1354 | extern const_debug unsigned int sysctl_sched_migration_cost; |
| 1355 | |
| 1356 | static inline u64 sched_avg_period(void) |
| 1357 | { |
| 1358 | return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; |
| 1359 | } |
| 1360 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1361 | #ifdef CONFIG_SCHED_HRTICK |
| 1362 | |
| 1363 | /* |
| 1364 | * Use hrtick when: |
| 1365 | * - enabled by features |
| 1366 | * - hrtimer is actually high res |
| 1367 | */ |
| 1368 | static inline int hrtick_enabled(struct rq *rq) |
| 1369 | { |
| 1370 | if (!sched_feat(HRTICK)) |
| 1371 | return 0; |
| 1372 | if (!cpu_active(cpu_of(rq))) |
| 1373 | return 0; |
| 1374 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
| 1375 | } |
| 1376 | |
| 1377 | void hrtick_start(struct rq *rq, u64 delay); |
| 1378 | |
Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 1379 | #else |
| 1380 | |
| 1381 | static inline int hrtick_enabled(struct rq *rq) |
| 1382 | { |
| 1383 | return 0; |
| 1384 | } |
| 1385 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1386 | #endif /* CONFIG_SCHED_HRTICK */ |
| 1387 | |
| 1388 | #ifdef CONFIG_SMP |
| 1389 | extern void sched_avg_update(struct rq *rq); |
Peter Zijlstra | dfbca41 | 2015-03-23 14:19:05 +0100 | [diff] [blame] | 1390 | |
| 1391 | #ifndef arch_scale_freq_capacity |
| 1392 | static __always_inline |
| 1393 | unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) |
| 1394 | { |
| 1395 | return SCHED_CAPACITY_SCALE; |
| 1396 | } |
| 1397 | #endif |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1398 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1399 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1400 | { |
Vincent Guittot | b5b4860 | 2015-02-27 16:54:08 +0100 | [diff] [blame] | 1401 | rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1402 | sched_avg_update(rq); |
| 1403 | } |
| 1404 | #else |
| 1405 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } |
| 1406 | static inline void sched_avg_update(struct rq *rq) { } |
| 1407 | #endif |
| 1408 | |
| 1409 | extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); |
| 1410 | |
Peter Zijlstra | 3960c8c | 2015-02-17 13:22:25 +0100 | [diff] [blame] | 1411 | /* |
| 1412 | * __task_rq_lock - lock the rq @p resides on. |
| 1413 | */ |
| 1414 | static inline struct rq *__task_rq_lock(struct task_struct *p) |
| 1415 | __acquires(rq->lock) |
| 1416 | { |
| 1417 | struct rq *rq; |
| 1418 | |
| 1419 | lockdep_assert_held(&p->pi_lock); |
| 1420 | |
| 1421 | for (;;) { |
| 1422 | rq = task_rq(p); |
| 1423 | raw_spin_lock(&rq->lock); |
| 1424 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) |
| 1425 | return rq; |
| 1426 | raw_spin_unlock(&rq->lock); |
| 1427 | |
| 1428 | while (unlikely(task_on_rq_migrating(p))) |
| 1429 | cpu_relax(); |
| 1430 | } |
| 1431 | } |
| 1432 | |
| 1433 | /* |
| 1434 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. |
| 1435 | */ |
| 1436 | static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) |
| 1437 | __acquires(p->pi_lock) |
| 1438 | __acquires(rq->lock) |
| 1439 | { |
| 1440 | struct rq *rq; |
| 1441 | |
| 1442 | for (;;) { |
| 1443 | raw_spin_lock_irqsave(&p->pi_lock, *flags); |
| 1444 | rq = task_rq(p); |
| 1445 | raw_spin_lock(&rq->lock); |
| 1446 | /* |
| 1447 | * move_queued_task() task_rq_lock() |
| 1448 | * |
| 1449 | * ACQUIRE (rq->lock) |
| 1450 | * [S] ->on_rq = MIGRATING [L] rq = task_rq() |
| 1451 | * WMB (__set_task_cpu()) ACQUIRE (rq->lock); |
| 1452 | * [S] ->cpu = new_cpu [L] task_rq() |
| 1453 | * [L] ->on_rq |
| 1454 | * RELEASE (rq->lock) |
| 1455 | * |
| 1456 | * If we observe the old cpu in task_rq_lock, the acquire of |
| 1457 | * the old rq->lock will fully serialize against the stores. |
| 1458 | * |
| 1459 | * If we observe the new cpu in task_rq_lock, the acquire will |
| 1460 | * pair with the WMB to ensure we must then also see migrating. |
| 1461 | */ |
| 1462 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) |
| 1463 | return rq; |
| 1464 | raw_spin_unlock(&rq->lock); |
| 1465 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); |
| 1466 | |
| 1467 | while (unlikely(task_on_rq_migrating(p))) |
| 1468 | cpu_relax(); |
| 1469 | } |
| 1470 | } |
| 1471 | |
| 1472 | static inline void __task_rq_unlock(struct rq *rq) |
| 1473 | __releases(rq->lock) |
| 1474 | { |
| 1475 | raw_spin_unlock(&rq->lock); |
| 1476 | } |
| 1477 | |
| 1478 | static inline void |
| 1479 | task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) |
| 1480 | __releases(rq->lock) |
| 1481 | __releases(p->pi_lock) |
| 1482 | { |
| 1483 | raw_spin_unlock(&rq->lock); |
| 1484 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); |
| 1485 | } |
| 1486 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1487 | #ifdef CONFIG_SMP |
| 1488 | #ifdef CONFIG_PREEMPT |
| 1489 | |
| 1490 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| 1491 | |
| 1492 | /* |
| 1493 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| 1494 | * way at the expense of forcing extra atomic operations in all |
| 1495 | * invocations. This assures that the double_lock is acquired using the |
| 1496 | * same underlying policy as the spinlock_t on this architecture, which |
| 1497 | * reduces latency compared to the unfair variant below. However, it |
| 1498 | * also adds more overhead and therefore may reduce throughput. |
| 1499 | */ |
| 1500 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1501 | __releases(this_rq->lock) |
| 1502 | __acquires(busiest->lock) |
| 1503 | __acquires(this_rq->lock) |
| 1504 | { |
| 1505 | raw_spin_unlock(&this_rq->lock); |
| 1506 | double_rq_lock(this_rq, busiest); |
| 1507 | |
| 1508 | return 1; |
| 1509 | } |
| 1510 | |
| 1511 | #else |
| 1512 | /* |
| 1513 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
| 1514 | * latency by eliminating extra atomic operations when the locks are |
| 1515 | * already in proper order on entry. This favors lower cpu-ids and will |
| 1516 | * grant the double lock to lower cpus over higher ids under contention, |
| 1517 | * regardless of entry order into the function. |
| 1518 | */ |
| 1519 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1520 | __releases(this_rq->lock) |
| 1521 | __acquires(busiest->lock) |
| 1522 | __acquires(this_rq->lock) |
| 1523 | { |
| 1524 | int ret = 0; |
| 1525 | |
| 1526 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
| 1527 | if (busiest < this_rq) { |
| 1528 | raw_spin_unlock(&this_rq->lock); |
| 1529 | raw_spin_lock(&busiest->lock); |
| 1530 | raw_spin_lock_nested(&this_rq->lock, |
| 1531 | SINGLE_DEPTH_NESTING); |
| 1532 | ret = 1; |
| 1533 | } else |
| 1534 | raw_spin_lock_nested(&busiest->lock, |
| 1535 | SINGLE_DEPTH_NESTING); |
| 1536 | } |
| 1537 | return ret; |
| 1538 | } |
| 1539 | |
| 1540 | #endif /* CONFIG_PREEMPT */ |
| 1541 | |
| 1542 | /* |
| 1543 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| 1544 | */ |
| 1545 | static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1546 | { |
| 1547 | if (unlikely(!irqs_disabled())) { |
| 1548 | /* printk() doesn't work good under rq->lock */ |
| 1549 | raw_spin_unlock(&this_rq->lock); |
| 1550 | BUG_ON(1); |
| 1551 | } |
| 1552 | |
| 1553 | return _double_lock_balance(this_rq, busiest); |
| 1554 | } |
| 1555 | |
| 1556 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 1557 | __releases(busiest->lock) |
| 1558 | { |
| 1559 | raw_spin_unlock(&busiest->lock); |
| 1560 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 1561 | } |
| 1562 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 1563 | static inline void double_lock(spinlock_t *l1, spinlock_t *l2) |
| 1564 | { |
| 1565 | if (l1 > l2) |
| 1566 | swap(l1, l2); |
| 1567 | |
| 1568 | spin_lock(l1); |
| 1569 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1570 | } |
| 1571 | |
Mike Galbraith | 60e69ee | 2014-04-07 10:55:15 +0200 | [diff] [blame] | 1572 | static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) |
| 1573 | { |
| 1574 | if (l1 > l2) |
| 1575 | swap(l1, l2); |
| 1576 | |
| 1577 | spin_lock_irq(l1); |
| 1578 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1579 | } |
| 1580 | |
Peter Zijlstra | 7460231 | 2013-10-10 20:17:22 +0200 | [diff] [blame] | 1581 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
| 1582 | { |
| 1583 | if (l1 > l2) |
| 1584 | swap(l1, l2); |
| 1585 | |
| 1586 | raw_spin_lock(l1); |
| 1587 | raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| 1588 | } |
| 1589 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1590 | /* |
| 1591 | * double_rq_lock - safely lock two runqueues |
| 1592 | * |
| 1593 | * Note this does not disable interrupts like task_rq_lock, |
| 1594 | * you need to do so manually before calling. |
| 1595 | */ |
| 1596 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1597 | __acquires(rq1->lock) |
| 1598 | __acquires(rq2->lock) |
| 1599 | { |
| 1600 | BUG_ON(!irqs_disabled()); |
| 1601 | if (rq1 == rq2) { |
| 1602 | raw_spin_lock(&rq1->lock); |
| 1603 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1604 | } else { |
| 1605 | if (rq1 < rq2) { |
| 1606 | raw_spin_lock(&rq1->lock); |
| 1607 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 1608 | } else { |
| 1609 | raw_spin_lock(&rq2->lock); |
| 1610 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 1611 | } |
| 1612 | } |
| 1613 | } |
| 1614 | |
| 1615 | /* |
| 1616 | * double_rq_unlock - safely unlock two runqueues |
| 1617 | * |
| 1618 | * Note this does not restore interrupts like task_rq_unlock, |
| 1619 | * you need to do so manually after calling. |
| 1620 | */ |
| 1621 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1622 | __releases(rq1->lock) |
| 1623 | __releases(rq2->lock) |
| 1624 | { |
| 1625 | raw_spin_unlock(&rq1->lock); |
| 1626 | if (rq1 != rq2) |
| 1627 | raw_spin_unlock(&rq2->lock); |
| 1628 | else |
| 1629 | __release(rq2->lock); |
| 1630 | } |
| 1631 | |
| 1632 | #else /* CONFIG_SMP */ |
| 1633 | |
| 1634 | /* |
| 1635 | * double_rq_lock - safely lock two runqueues |
| 1636 | * |
| 1637 | * Note this does not disable interrupts like task_rq_lock, |
| 1638 | * you need to do so manually before calling. |
| 1639 | */ |
| 1640 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1641 | __acquires(rq1->lock) |
| 1642 | __acquires(rq2->lock) |
| 1643 | { |
| 1644 | BUG_ON(!irqs_disabled()); |
| 1645 | BUG_ON(rq1 != rq2); |
| 1646 | raw_spin_lock(&rq1->lock); |
| 1647 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1648 | } |
| 1649 | |
| 1650 | /* |
| 1651 | * double_rq_unlock - safely unlock two runqueues |
| 1652 | * |
| 1653 | * Note this does not restore interrupts like task_rq_unlock, |
| 1654 | * you need to do so manually after calling. |
| 1655 | */ |
| 1656 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1657 | __releases(rq1->lock) |
| 1658 | __releases(rq2->lock) |
| 1659 | { |
| 1660 | BUG_ON(rq1 != rq2); |
| 1661 | raw_spin_unlock(&rq1->lock); |
| 1662 | __release(rq2->lock); |
| 1663 | } |
| 1664 | |
| 1665 | #endif |
| 1666 | |
| 1667 | extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); |
| 1668 | extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); |
| 1669 | extern void print_cfs_stats(struct seq_file *m, int cpu); |
| 1670 | extern void print_rt_stats(struct seq_file *m, int cpu); |
Wanpeng Li | acb3213 | 2014-10-31 06:39:33 +0800 | [diff] [blame] | 1671 | extern void print_dl_stats(struct seq_file *m, int cpu); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1672 | |
| 1673 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
Abel Vesa | 07c54f7 | 2015-03-03 13:50:27 +0200 | [diff] [blame] | 1674 | extern void init_rt_rq(struct rt_rq *rt_rq); |
| 1675 | extern void init_dl_rq(struct dl_rq *dl_rq); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1676 | |
Ben Segall | 1ee14e6 | 2013-10-16 11:16:12 -0700 | [diff] [blame] | 1677 | extern void cfs_bandwidth_usage_inc(void); |
| 1678 | extern void cfs_bandwidth_usage_dec(void); |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 1679 | |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 1680 | #ifdef CONFIG_NO_HZ_COMMON |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 1681 | enum rq_nohz_flag_bits { |
| 1682 | NOHZ_TICK_STOPPED, |
| 1683 | NOHZ_BALANCE_KICK, |
| 1684 | }; |
| 1685 | |
| 1686 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
| 1687 | #endif |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1688 | |
| 1689 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 1690 | |
| 1691 | DECLARE_PER_CPU(u64, cpu_hardirq_time); |
| 1692 | DECLARE_PER_CPU(u64, cpu_softirq_time); |
| 1693 | |
| 1694 | #ifndef CONFIG_64BIT |
| 1695 | DECLARE_PER_CPU(seqcount_t, irq_time_seq); |
| 1696 | |
| 1697 | static inline void irq_time_write_begin(void) |
| 1698 | { |
| 1699 | __this_cpu_inc(irq_time_seq.sequence); |
| 1700 | smp_wmb(); |
| 1701 | } |
| 1702 | |
| 1703 | static inline void irq_time_write_end(void) |
| 1704 | { |
| 1705 | smp_wmb(); |
| 1706 | __this_cpu_inc(irq_time_seq.sequence); |
| 1707 | } |
| 1708 | |
| 1709 | static inline u64 irq_time_read(int cpu) |
| 1710 | { |
| 1711 | u64 irq_time; |
| 1712 | unsigned seq; |
| 1713 | |
| 1714 | do { |
| 1715 | seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); |
| 1716 | irq_time = per_cpu(cpu_softirq_time, cpu) + |
| 1717 | per_cpu(cpu_hardirq_time, cpu); |
| 1718 | } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); |
| 1719 | |
| 1720 | return irq_time; |
| 1721 | } |
| 1722 | #else /* CONFIG_64BIT */ |
| 1723 | static inline void irq_time_write_begin(void) |
| 1724 | { |
| 1725 | } |
| 1726 | |
| 1727 | static inline void irq_time_write_end(void) |
| 1728 | { |
| 1729 | } |
| 1730 | |
| 1731 | static inline u64 irq_time_read(int cpu) |
| 1732 | { |
| 1733 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); |
| 1734 | } |
| 1735 | #endif /* CONFIG_64BIT */ |
| 1736 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |