blob: 61ea3039cdc17cd8dd1907a91696f199772b5515 [file] [log] [blame]
Dario Faggioliaab03e02013-11-28 11:14:43 +01001/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
Juri Lelli1baca4c2013-11-07 14:43:38 +010013 * Juri Lelli <juri.lelli@gmail.com>,
Dario Faggioliaab03e02013-11-28 11:14:43 +010014 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17#include "sched.h"
18
Juri Lelli6bfd6d72013-11-07 14:43:47 +010019#include <linux/slab.h>
20
Dario Faggioli332ac172013-11-07 14:43:45 +010021struct dl_bandwidth def_dl_bandwidth;
22
Dario Faggioliaab03e02013-11-28 11:14:43 +010023static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
Luca Abenie36d8672017-05-18 22:13:28 +020046static inline
47void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
48{
49 u64 old = dl_rq->running_bw;
50
51 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
52 dl_rq->running_bw += dl_bw;
53 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
54}
55
56static inline
57void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
58{
59 u64 old = dl_rq->running_bw;
60
61 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
62 dl_rq->running_bw -= dl_bw;
63 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
64 if (dl_rq->running_bw > old)
65 dl_rq->running_bw = 0;
66}
67
Luca Abeni209a0cb2017-05-18 22:13:29 +020068void dl_change_utilization(struct task_struct *p, u64 new_bw)
69{
70 if (task_on_rq_queued(p))
71 return;
72
73 if (!p->dl.dl_non_contending)
74 return;
75
76 sub_running_bw(p->dl.dl_bw, &task_rq(p)->dl);
77 p->dl.dl_non_contending = 0;
78 /*
79 * If the timer handler is currently running and the
80 * timer cannot be cancelled, inactive_task_timer()
81 * will see that dl_not_contending is not set, and
82 * will not touch the rq's active utilization,
83 * so we are still safe.
84 */
85 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
86 put_task_struct(p);
87}
88
89/*
90 * The utilization of a task cannot be immediately removed from
91 * the rq active utilization (running_bw) when the task blocks.
92 * Instead, we have to wait for the so called "0-lag time".
93 *
94 * If a task blocks before the "0-lag time", a timer (the inactive
95 * timer) is armed, and running_bw is decreased when the timer
96 * fires.
97 *
98 * If the task wakes up again before the inactive timer fires,
99 * the timer is cancelled, whereas if the task wakes up after the
100 * inactive timer fired (and running_bw has been decreased) the
101 * task's utilization has to be added to running_bw again.
102 * A flag in the deadline scheduling entity (dl_non_contending)
103 * is used to avoid race conditions between the inactive timer handler
104 * and task wakeups.
105 *
106 * The following diagram shows how running_bw is updated. A task is
107 * "ACTIVE" when its utilization contributes to running_bw; an
108 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
109 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
110 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
111 * time already passed, which does not contribute to running_bw anymore.
112 * +------------------+
113 * wakeup | ACTIVE |
114 * +------------------>+ contending |
115 * | add_running_bw | |
116 * | +----+------+------+
117 * | | ^
118 * | dequeue | |
119 * +--------+-------+ | |
120 * | | t >= 0-lag | | wakeup
121 * | INACTIVE |<---------------+ |
122 * | | sub_running_bw | |
123 * +--------+-------+ | |
124 * ^ | |
125 * | t < 0-lag | |
126 * | | |
127 * | V |
128 * | +----+------+------+
129 * | sub_running_bw | ACTIVE |
130 * +-------------------+ |
131 * inactive timer | non contending |
132 * fired +------------------+
133 *
134 * The task_non_contending() function is invoked when a task
135 * blocks, and checks if the 0-lag time already passed or
136 * not (in the first case, it directly updates running_bw;
137 * in the second case, it arms the inactive timer).
138 *
139 * The task_contending() function is invoked when a task wakes
140 * up, and checks if the task is still in the "ACTIVE non contending"
141 * state or not (in the second case, it updates running_bw).
142 */
143static void task_non_contending(struct task_struct *p)
144{
145 struct sched_dl_entity *dl_se = &p->dl;
146 struct hrtimer *timer = &dl_se->inactive_timer;
147 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
148 struct rq *rq = rq_of_dl_rq(dl_rq);
149 s64 zerolag_time;
150
151 /*
152 * If this is a non-deadline task that has been boosted,
153 * do nothing
154 */
155 if (dl_se->dl_runtime == 0)
156 return;
157
158 WARN_ON(hrtimer_active(&dl_se->inactive_timer));
159 WARN_ON(dl_se->dl_non_contending);
160
161 zerolag_time = dl_se->deadline -
162 div64_long((dl_se->runtime * dl_se->dl_period),
163 dl_se->dl_runtime);
164
165 /*
166 * Using relative times instead of the absolute "0-lag time"
167 * allows to simplify the code
168 */
169 zerolag_time -= rq_clock(rq);
170
171 /*
172 * If the "0-lag time" already passed, decrease the active
173 * utilization now, instead of starting a timer
174 */
175 if (zerolag_time < 0) {
176 if (dl_task(p))
177 sub_running_bw(dl_se->dl_bw, dl_rq);
Luca Abeni387e3132017-05-18 22:13:30 +0200178 if (!dl_task(p) || p->state == TASK_DEAD) {
179 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
180
181 raw_spin_lock(&dl_b->lock);
182 __dl_clear(dl_b, p->dl.dl_bw);
Luca Abeni209a0cb2017-05-18 22:13:29 +0200183 __dl_clear_params(p);
Luca Abeni387e3132017-05-18 22:13:30 +0200184 raw_spin_unlock(&dl_b->lock);
185 }
Luca Abeni209a0cb2017-05-18 22:13:29 +0200186
187 return;
188 }
189
190 dl_se->dl_non_contending = 1;
191 get_task_struct(p);
192 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
193}
194
195static void task_contending(struct sched_dl_entity *dl_se)
196{
197 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
198
199 /*
200 * If this is a non-deadline task that has been boosted,
201 * do nothing
202 */
203 if (dl_se->dl_runtime == 0)
204 return;
205
206 if (dl_se->dl_non_contending) {
207 dl_se->dl_non_contending = 0;
208 /*
209 * If the timer handler is currently running and the
210 * timer cannot be cancelled, inactive_task_timer()
211 * will see that dl_not_contending is not set, and
212 * will not touch the rq's active utilization,
213 * so we are still safe.
214 */
215 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
216 put_task_struct(dl_task_of(dl_se));
217 } else {
218 /*
219 * Since "dl_non_contending" is not set, the
220 * task's utilization has already been removed from
221 * active utilization (either when the task blocked,
222 * when the "inactive timer" fired).
223 * So, add it back.
224 */
225 add_running_bw(dl_se->dl_bw, dl_rq);
226 }
227}
228
Dario Faggioliaab03e02013-11-28 11:14:43 +0100229static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
230{
231 struct sched_dl_entity *dl_se = &p->dl;
232
233 return dl_rq->rb_leftmost == &dl_se->rb_node;
234}
235
Dario Faggioli332ac172013-11-07 14:43:45 +0100236void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
237{
238 raw_spin_lock_init(&dl_b->dl_runtime_lock);
239 dl_b->dl_period = period;
240 dl_b->dl_runtime = runtime;
241}
242
Dario Faggioli332ac172013-11-07 14:43:45 +0100243void init_dl_bw(struct dl_bw *dl_b)
244{
245 raw_spin_lock_init(&dl_b->lock);
246 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
Peter Zijlstra17248132013-12-17 12:44:49 +0100247 if (global_rt_runtime() == RUNTIME_INF)
Dario Faggioli332ac172013-11-07 14:43:45 +0100248 dl_b->bw = -1;
249 else
Peter Zijlstra17248132013-12-17 12:44:49 +0100250 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
Dario Faggioli332ac172013-11-07 14:43:45 +0100251 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
252 dl_b->total_bw = 0;
253}
254
Abel Vesa07c54f72015-03-03 13:50:27 +0200255void init_dl_rq(struct dl_rq *dl_rq)
Dario Faggioliaab03e02013-11-28 11:14:43 +0100256{
257 dl_rq->rb_root = RB_ROOT;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100258
259#ifdef CONFIG_SMP
260 /* zero means no -deadline tasks */
261 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
262
263 dl_rq->dl_nr_migratory = 0;
264 dl_rq->overloaded = 0;
265 dl_rq->pushable_dl_tasks_root = RB_ROOT;
Dario Faggioli332ac172013-11-07 14:43:45 +0100266#else
267 init_dl_bw(&dl_rq->dl_bw);
Juri Lelli1baca4c2013-11-07 14:43:38 +0100268#endif
Luca Abenie36d8672017-05-18 22:13:28 +0200269
270 dl_rq->running_bw = 0;
Luca Abeni4da3abc2017-05-18 22:13:32 +0200271 init_dl_rq_bw_ratio(dl_rq);
Dario Faggioliaab03e02013-11-28 11:14:43 +0100272}
273
Juri Lelli1baca4c2013-11-07 14:43:38 +0100274#ifdef CONFIG_SMP
275
276static inline int dl_overloaded(struct rq *rq)
277{
278 return atomic_read(&rq->rd->dlo_count);
279}
280
281static inline void dl_set_overload(struct rq *rq)
282{
283 if (!rq->online)
284 return;
285
286 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
287 /*
288 * Must be visible before the overload count is
289 * set (as in sched_rt.c).
290 *
291 * Matched by the barrier in pull_dl_task().
292 */
293 smp_wmb();
294 atomic_inc(&rq->rd->dlo_count);
295}
296
297static inline void dl_clear_overload(struct rq *rq)
298{
299 if (!rq->online)
300 return;
301
302 atomic_dec(&rq->rd->dlo_count);
303 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
304}
305
306static void update_dl_migration(struct dl_rq *dl_rq)
307{
Kirill Tkhai995b9ea2014-02-18 02:24:13 +0400308 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
Juri Lelli1baca4c2013-11-07 14:43:38 +0100309 if (!dl_rq->overloaded) {
310 dl_set_overload(rq_of_dl_rq(dl_rq));
311 dl_rq->overloaded = 1;
312 }
313 } else if (dl_rq->overloaded) {
314 dl_clear_overload(rq_of_dl_rq(dl_rq));
315 dl_rq->overloaded = 0;
316 }
317}
318
319static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
320{
321 struct task_struct *p = dl_task_of(dl_se);
Juri Lelli1baca4c2013-11-07 14:43:38 +0100322
Ingo Molnar4b53a342017-02-05 15:41:03 +0100323 if (p->nr_cpus_allowed > 1)
Juri Lelli1baca4c2013-11-07 14:43:38 +0100324 dl_rq->dl_nr_migratory++;
325
326 update_dl_migration(dl_rq);
327}
328
329static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
330{
331 struct task_struct *p = dl_task_of(dl_se);
Juri Lelli1baca4c2013-11-07 14:43:38 +0100332
Ingo Molnar4b53a342017-02-05 15:41:03 +0100333 if (p->nr_cpus_allowed > 1)
Juri Lelli1baca4c2013-11-07 14:43:38 +0100334 dl_rq->dl_nr_migratory--;
335
336 update_dl_migration(dl_rq);
337}
338
339/*
340 * The list of pushable -deadline task is not a plist, like in
341 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
342 */
343static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
344{
345 struct dl_rq *dl_rq = &rq->dl;
346 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
347 struct rb_node *parent = NULL;
348 struct task_struct *entry;
349 int leftmost = 1;
350
351 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
352
353 while (*link) {
354 parent = *link;
355 entry = rb_entry(parent, struct task_struct,
356 pushable_dl_tasks);
357 if (dl_entity_preempt(&p->dl, &entry->dl))
358 link = &parent->rb_left;
359 else {
360 link = &parent->rb_right;
361 leftmost = 0;
362 }
363 }
364
Wanpeng Li7d92de32015-12-03 17:42:10 +0800365 if (leftmost) {
Juri Lelli1baca4c2013-11-07 14:43:38 +0100366 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
Wanpeng Li7d92de32015-12-03 17:42:10 +0800367 dl_rq->earliest_dl.next = p->dl.deadline;
368 }
Juri Lelli1baca4c2013-11-07 14:43:38 +0100369
370 rb_link_node(&p->pushable_dl_tasks, parent, link);
371 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
372}
373
374static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
375{
376 struct dl_rq *dl_rq = &rq->dl;
377
378 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
379 return;
380
381 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
382 struct rb_node *next_node;
383
384 next_node = rb_next(&p->pushable_dl_tasks);
385 dl_rq->pushable_dl_tasks_leftmost = next_node;
Wanpeng Li7d92de32015-12-03 17:42:10 +0800386 if (next_node) {
387 dl_rq->earliest_dl.next = rb_entry(next_node,
388 struct task_struct, pushable_dl_tasks)->dl.deadline;
389 }
Juri Lelli1baca4c2013-11-07 14:43:38 +0100390 }
391
392 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
393 RB_CLEAR_NODE(&p->pushable_dl_tasks);
394}
395
396static inline int has_pushable_dl_tasks(struct rq *rq)
397{
398 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
399}
400
401static int push_dl_task(struct rq *rq);
402
Peter Zijlstradc877342014-02-12 15:47:29 +0100403static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
404{
405 return dl_task(prev);
406}
407
Peter Zijlstra9916e212015-06-11 14:46:43 +0200408static DEFINE_PER_CPU(struct callback_head, dl_push_head);
409static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200410
411static void push_dl_tasks(struct rq *);
Peter Zijlstra9916e212015-06-11 14:46:43 +0200412static void pull_dl_task(struct rq *);
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200413
414static inline void queue_push_tasks(struct rq *rq)
Peter Zijlstradc877342014-02-12 15:47:29 +0100415{
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200416 if (!has_pushable_dl_tasks(rq))
417 return;
418
Peter Zijlstra9916e212015-06-11 14:46:43 +0200419 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
420}
421
422static inline void queue_pull_task(struct rq *rq)
423{
424 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
Peter Zijlstradc877342014-02-12 15:47:29 +0100425}
426
Wanpeng Lifa9c9d12015-03-27 07:08:35 +0800427static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
428
Peter Zijlstraa649f232015-06-11 14:46:49 +0200429static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
Wanpeng Lifa9c9d12015-03-27 07:08:35 +0800430{
431 struct rq *later_rq = NULL;
Wanpeng Lifa9c9d12015-03-27 07:08:35 +0800432
433 later_rq = find_lock_later_rq(p, rq);
Wanpeng Lifa9c9d12015-03-27 07:08:35 +0800434 if (!later_rq) {
435 int cpu;
436
437 /*
438 * If we cannot preempt any rq, fall back to pick any
439 * online cpu.
440 */
Ingo Molnar0c98d342017-02-05 15:38:10 +0100441 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
Wanpeng Lifa9c9d12015-03-27 07:08:35 +0800442 if (cpu >= nr_cpu_ids) {
443 /*
444 * Fail to find any suitable cpu.
445 * The task will never come back!
446 */
447 BUG_ON(dl_bandwidth_enabled());
448
449 /*
450 * If admission control is disabled we
451 * try a little harder to let the task
452 * run.
453 */
454 cpu = cpumask_any(cpu_active_mask);
455 }
456 later_rq = cpu_rq(cpu);
457 double_lock_balance(rq, later_rq);
458 }
459
Wanpeng Lifa9c9d12015-03-27 07:08:35 +0800460 set_task_cpu(p, later_rq->cpu);
Peter Zijlstraa649f232015-06-11 14:46:49 +0200461 double_unlock_balance(later_rq, rq);
462
463 return later_rq;
Wanpeng Lifa9c9d12015-03-27 07:08:35 +0800464}
465
Juri Lelli1baca4c2013-11-07 14:43:38 +0100466#else
467
468static inline
469void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
470{
471}
472
473static inline
474void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
475{
476}
477
478static inline
479void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
480{
481}
482
483static inline
484void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
485{
486}
487
Peter Zijlstradc877342014-02-12 15:47:29 +0100488static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
489{
490 return false;
491}
492
Peter Zijlstra0ea60c22015-06-11 14:46:42 +0200493static inline void pull_dl_task(struct rq *rq)
Peter Zijlstradc877342014-02-12 15:47:29 +0100494{
Peter Zijlstradc877342014-02-12 15:47:29 +0100495}
496
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200497static inline void queue_push_tasks(struct rq *rq)
Peter Zijlstradc877342014-02-12 15:47:29 +0100498{
499}
Peter Zijlstra9916e212015-06-11 14:46:43 +0200500
501static inline void queue_pull_task(struct rq *rq)
Juri Lelli1baca4c2013-11-07 14:43:38 +0100502{
503}
504#endif /* CONFIG_SMP */
505
Dario Faggioliaab03e02013-11-28 11:14:43 +0100506static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
507static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
508static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
509 int flags);
510
511/*
512 * We are being explicitly informed that a new instance is starting,
513 * and this means that:
514 * - the absolute deadline of the entity has to be placed at
515 * current time + relative deadline;
516 * - the runtime of the entity has to be set to the maximum value.
517 *
518 * The capability of specifying such event is useful whenever a -deadline
519 * entity wants to (try to!) synchronize its behaviour with the scheduler's
520 * one, and to (try to!) reconcile itself with its own scheduling
521 * parameters.
522 */
Juri Lelli98b0a852016-08-05 16:07:55 +0100523static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
Dario Faggioliaab03e02013-11-28 11:14:43 +0100524{
525 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
526 struct rq *rq = rq_of_dl_rq(dl_rq);
527
Juri Lelli98b0a852016-08-05 16:07:55 +0100528 WARN_ON(dl_se->dl_boosted);
Luca Abeni72f9f3f2016-03-07 12:27:04 +0100529 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
530
531 /*
532 * We are racing with the deadline timer. So, do nothing because
533 * the deadline timer handler will take care of properly recharging
534 * the runtime and postponing the deadline
535 */
536 if (dl_se->dl_throttled)
537 return;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100538
539 /*
540 * We use the regular wall clock time to set deadlines in the
541 * future; in fact, we must consider execution overheads (time
542 * spent on hardirq context, etc.).
543 */
Juri Lelli98b0a852016-08-05 16:07:55 +0100544 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
545 dl_se->runtime = dl_se->dl_runtime;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100546}
547
548/*
549 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
550 * possibility of a entity lasting more than what it declared, and thus
551 * exhausting its runtime.
552 *
553 * Here we are interested in making runtime overrun possible, but we do
554 * not want a entity which is misbehaving to affect the scheduling of all
555 * other entities.
556 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
557 * is used, in order to confine each entity within its own bandwidth.
558 *
559 * This function deals exactly with that, and ensures that when the runtime
560 * of a entity is replenished, its deadline is also postponed. That ensures
561 * the overrunning entity can't interfere with other entity in the system and
562 * can't make them miss their deadlines. Reasons why this kind of overruns
563 * could happen are, typically, a entity voluntarily trying to overcome its
xiaofeng.yan1b09d292014-07-07 05:59:04 +0000564 * runtime, or it just underestimated it during sched_setattr().
Dario Faggioliaab03e02013-11-28 11:14:43 +0100565 */
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100566static void replenish_dl_entity(struct sched_dl_entity *dl_se,
567 struct sched_dl_entity *pi_se)
Dario Faggioliaab03e02013-11-28 11:14:43 +0100568{
569 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
570 struct rq *rq = rq_of_dl_rq(dl_rq);
571
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100572 BUG_ON(pi_se->dl_runtime <= 0);
573
574 /*
575 * This could be the case for a !-dl task that is boosted.
576 * Just go with full inherited parameters.
577 */
578 if (dl_se->dl_deadline == 0) {
579 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
580 dl_se->runtime = pi_se->dl_runtime;
581 }
582
Peter Zijlstra48be3a62016-02-23 13:28:22 +0100583 if (dl_se->dl_yielded && dl_se->runtime > 0)
584 dl_se->runtime = 0;
585
Dario Faggioliaab03e02013-11-28 11:14:43 +0100586 /*
587 * We keep moving the deadline away until we get some
588 * available runtime for the entity. This ensures correct
589 * handling of situations where the runtime overrun is
590 * arbitrary large.
591 */
592 while (dl_se->runtime <= 0) {
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100593 dl_se->deadline += pi_se->dl_period;
594 dl_se->runtime += pi_se->dl_runtime;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100595 }
596
597 /*
598 * At this point, the deadline really should be "in
599 * the future" with respect to rq->clock. If it's
600 * not, we are, for some reason, lagging too much!
601 * Anyway, after having warn userspace abut that,
602 * we still try to keep the things running by
603 * resetting the deadline and the budget of the
604 * entity.
605 */
606 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
Steven Rostedtc219b7d2016-02-10 12:04:22 -0500607 printk_deferred_once("sched: DL replenish lagged too much\n");
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100608 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
609 dl_se->runtime = pi_se->dl_runtime;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100610 }
Peter Zijlstra1019a352014-11-26 08:44:03 +0800611
612 if (dl_se->dl_yielded)
613 dl_se->dl_yielded = 0;
614 if (dl_se->dl_throttled)
615 dl_se->dl_throttled = 0;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100616}
617
618/*
619 * Here we check if --at time t-- an entity (which is probably being
620 * [re]activated or, in general, enqueued) can use its remaining runtime
621 * and its current deadline _without_ exceeding the bandwidth it is
622 * assigned (function returns true if it can't). We are in fact applying
623 * one of the CBS rules: when a task wakes up, if the residual runtime
624 * over residual deadline fits within the allocated bandwidth, then we
625 * can keep the current (absolute) deadline and residual budget without
626 * disrupting the schedulability of the system. Otherwise, we should
627 * refill the runtime and set the deadline a period in the future,
628 * because keeping the current (absolute) deadline of the task would
Dario Faggioli712e5e32014-01-27 12:20:15 +0100629 * result in breaking guarantees promised to other tasks (refer to
630 * Documentation/scheduler/sched-deadline.txt for more informations).
Dario Faggioliaab03e02013-11-28 11:14:43 +0100631 *
632 * This function returns true if:
633 *
Steven Rostedt (VMware)2317d5f2017-03-02 15:10:59 +0100634 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
Dario Faggioliaab03e02013-11-28 11:14:43 +0100635 *
636 * IOW we can't recycle current parameters.
Harald Gustafsson755378a2013-11-07 14:43:40 +0100637 *
Steven Rostedt (VMware)2317d5f2017-03-02 15:10:59 +0100638 * Notice that the bandwidth check is done against the deadline. For
Harald Gustafsson755378a2013-11-07 14:43:40 +0100639 * task with deadline equal to period this is the same of using
Steven Rostedt (VMware)2317d5f2017-03-02 15:10:59 +0100640 * dl_period instead of dl_deadline in the equation above.
Dario Faggioliaab03e02013-11-28 11:14:43 +0100641 */
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100642static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
643 struct sched_dl_entity *pi_se, u64 t)
Dario Faggioliaab03e02013-11-28 11:14:43 +0100644{
645 u64 left, right;
646
647 /*
648 * left and right are the two sides of the equation above,
649 * after a bit of shuffling to use multiplications instead
650 * of divisions.
651 *
652 * Note that none of the time values involved in the two
653 * multiplications are absolute: dl_deadline and dl_runtime
654 * are the relative deadline and the maximum runtime of each
655 * instance, runtime is the runtime left for the last instance
656 * and (deadline - t), since t is rq->clock, is the time left
657 * to the (absolute) deadline. Even if overflowing the u64 type
658 * is very unlikely to occur in both cases, here we scale down
659 * as we want to avoid that risk at all. Scaling down by 10
660 * means that we reduce granularity to 1us. We are fine with it,
661 * since this is only a true/false check and, anyway, thinking
662 * of anything below microseconds resolution is actually fiction
663 * (but still we want to give the user that illusion >;).
664 */
Steven Rostedt (VMware)2317d5f2017-03-02 15:10:59 +0100665 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
Dario Faggioli332ac172013-11-07 14:43:45 +0100666 right = ((dl_se->deadline - t) >> DL_SCALE) *
667 (pi_se->dl_runtime >> DL_SCALE);
Dario Faggioliaab03e02013-11-28 11:14:43 +0100668
669 return dl_time_before(right, left);
670}
671
672/*
673 * When a -deadline entity is queued back on the runqueue, its runtime and
674 * deadline might need updating.
675 *
676 * The policy here is that we update the deadline of the entity only if:
677 * - the current deadline is in the past,
678 * - using the remaining runtime with the current deadline would make
679 * the entity exceed its bandwidth.
680 */
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100681static void update_dl_entity(struct sched_dl_entity *dl_se,
682 struct sched_dl_entity *pi_se)
Dario Faggioliaab03e02013-11-28 11:14:43 +0100683{
684 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
685 struct rq *rq = rq_of_dl_rq(dl_rq);
686
Dario Faggioliaab03e02013-11-28 11:14:43 +0100687 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100688 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
689 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
690 dl_se->runtime = pi_se->dl_runtime;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100691 }
692}
693
Daniel Bristot de Oliveira5ac69d32017-03-02 15:10:57 +0100694static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
695{
696 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
697}
698
Dario Faggioliaab03e02013-11-28 11:14:43 +0100699/*
700 * If the entity depleted all its runtime, and if we want it to sleep
701 * while waiting for some new execution time to become available, we
Daniel Bristot de Oliveira5ac69d32017-03-02 15:10:57 +0100702 * set the bandwidth replenishment timer to the replenishment instant
Dario Faggioliaab03e02013-11-28 11:14:43 +0100703 * and try to activate it.
704 *
705 * Notice that it is important for the caller to know if the timer
706 * actually started or not (i.e., the replenishment instant is in
707 * the future or in the past).
708 */
Peter Zijlstraa649f232015-06-11 14:46:49 +0200709static int start_dl_timer(struct task_struct *p)
Dario Faggioliaab03e02013-11-28 11:14:43 +0100710{
Peter Zijlstraa649f232015-06-11 14:46:49 +0200711 struct sched_dl_entity *dl_se = &p->dl;
712 struct hrtimer *timer = &dl_se->dl_timer;
713 struct rq *rq = task_rq(p);
Dario Faggioliaab03e02013-11-28 11:14:43 +0100714 ktime_t now, act;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100715 s64 delta;
716
Peter Zijlstraa649f232015-06-11 14:46:49 +0200717 lockdep_assert_held(&rq->lock);
718
Dario Faggioliaab03e02013-11-28 11:14:43 +0100719 /*
720 * We want the timer to fire at the deadline, but considering
721 * that it is actually coming from rq->clock and not from
722 * hrtimer's time base reading.
723 */
Daniel Bristot de Oliveira5ac69d32017-03-02 15:10:57 +0100724 act = ns_to_ktime(dl_next_period(dl_se));
Peter Zijlstraa649f232015-06-11 14:46:49 +0200725 now = hrtimer_cb_get_time(timer);
Dario Faggioliaab03e02013-11-28 11:14:43 +0100726 delta = ktime_to_ns(now) - rq_clock(rq);
727 act = ktime_add_ns(act, delta);
728
729 /*
730 * If the expiry time already passed, e.g., because the value
731 * chosen as the deadline is too small, don't even try to
732 * start the timer in the past!
733 */
734 if (ktime_us_delta(act, now) < 0)
735 return 0;
736
Peter Zijlstraa649f232015-06-11 14:46:49 +0200737 /*
738 * !enqueued will guarantee another callback; even if one is already in
739 * progress. This ensures a balanced {get,put}_task_struct().
740 *
741 * The race against __run_timer() clearing the enqueued state is
742 * harmless because we're holding task_rq()->lock, therefore the timer
743 * expiring after we've done the check will wait on its task_rq_lock()
744 * and observe our state.
745 */
746 if (!hrtimer_is_queued(timer)) {
747 get_task_struct(p);
748 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
749 }
Dario Faggioliaab03e02013-11-28 11:14:43 +0100750
Thomas Gleixnercc9684d2015-04-14 21:09:06 +0000751 return 1;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100752}
753
754/*
755 * This is the bandwidth enforcement timer callback. If here, we know
756 * a task is not on its dl_rq, since the fact that the timer was running
757 * means the task is throttled and needs a runtime replenishment.
758 *
759 * However, what we actually do depends on the fact the task is active,
760 * (it is on its rq) or has been removed from there by a call to
761 * dequeue_task_dl(). In the former case we must issue the runtime
762 * replenishment and add the task back to the dl_rq; in the latter, we just
763 * do nothing but clearing dl_throttled, so that runtime and deadline
764 * updating (and the queueing back to dl_rq) will be done by the
765 * next call to enqueue_task_dl().
766 */
767static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
768{
769 struct sched_dl_entity *dl_se = container_of(timer,
770 struct sched_dl_entity,
771 dl_timer);
772 struct task_struct *p = dl_task_of(dl_se);
Peter Zijlstraeb580752015-07-31 21:28:18 +0200773 struct rq_flags rf;
Kirill Tkhai0f397f22014-05-20 13:33:42 +0400774 struct rq *rq;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100775
Peter Zijlstraeb580752015-07-31 21:28:18 +0200776 rq = task_rq_lock(p, &rf);
Kirill Tkhai0f397f22014-05-20 13:33:42 +0400777
Dario Faggioliaab03e02013-11-28 11:14:43 +0100778 /*
Peter Zijlstraa649f232015-06-11 14:46:49 +0200779 * The task might have changed its scheduling policy to something
Daniel Bristot de Oliveira9846d502016-11-08 11:15:23 +0100780 * different than SCHED_DEADLINE (through switched_from_dl()).
Dario Faggioliaab03e02013-11-28 11:14:43 +0100781 */
Luca Abeni209a0cb2017-05-18 22:13:29 +0200782 if (!dl_task(p))
Peter Zijlstraa649f232015-06-11 14:46:49 +0200783 goto unlock;
Peter Zijlstraa649f232015-06-11 14:46:49 +0200784
785 /*
Peter Zijlstraa649f232015-06-11 14:46:49 +0200786 * The task might have been boosted by someone else and might be in the
787 * boosting/deboosting path, its not throttled.
788 */
789 if (dl_se->dl_boosted)
790 goto unlock;
791
792 /*
793 * Spurious timer due to start_dl_timer() race; or we already received
794 * a replenishment from rt_mutex_setprio().
795 */
796 if (!dl_se->dl_throttled)
Dario Faggioliaab03e02013-11-28 11:14:43 +0100797 goto unlock;
798
799 sched_clock_tick();
800 update_rq_clock(rq);
Kirill Tkhaia79ec892015-02-16 15:38:34 +0300801
802 /*
803 * If the throttle happened during sched-out; like:
804 *
805 * schedule()
806 * deactivate_task()
807 * dequeue_task_dl()
808 * update_curr_dl()
809 * start_dl_timer()
810 * __dequeue_task_dl()
811 * prev->on_rq = 0;
812 *
813 * We can be both throttled and !queued. Replenish the counter
814 * but do not enqueue -- wait for our wakeup to do that.
815 */
816 if (!task_on_rq_queued(p)) {
817 replenish_dl_entity(dl_se, dl_se);
818 goto unlock;
819 }
820
Wanpeng Li61c7aca2016-08-31 18:27:44 +0800821#ifdef CONFIG_SMP
822 if (unlikely(!rq->online)) {
823 /*
824 * If the runqueue is no longer available, migrate the
825 * task elsewhere. This necessarily changes rq.
826 */
827 lockdep_unpin_lock(&rq->lock, rf.cookie);
828 rq = dl_task_offline_migration(rq, p);
829 rf.cookie = lockdep_pin_lock(&rq->lock);
Wanpeng Lidcc3b5f2017-03-06 21:51:28 -0800830 update_rq_clock(rq);
Wanpeng Li61c7aca2016-08-31 18:27:44 +0800831
832 /*
833 * Now that the task has been migrated to the new RQ and we
834 * have that locked, proceed as normal and enqueue the task
835 * there.
836 */
837 }
838#endif
839
Peter Zijlstra1019a352014-11-26 08:44:03 +0800840 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
841 if (dl_task(rq->curr))
842 check_preempt_curr_dl(rq, p, 0);
843 else
844 resched_curr(rq);
Peter Zijlstraa649f232015-06-11 14:46:49 +0200845
Juri Lelli1baca4c2013-11-07 14:43:38 +0100846#ifdef CONFIG_SMP
Peter Zijlstra1019a352014-11-26 08:44:03 +0800847 /*
Peter Zijlstraa649f232015-06-11 14:46:49 +0200848 * Queueing this task back might have overloaded rq, check if we need
849 * to kick someone away.
Peter Zijlstra1019a352014-11-26 08:44:03 +0800850 */
Peter Zijlstra0aaafaa2015-10-23 11:50:08 +0200851 if (has_pushable_dl_tasks(rq)) {
852 /*
853 * Nothing relies on rq->lock after this, so its safe to drop
854 * rq->lock.
855 */
Matt Flemingd8ac8972016-09-21 14:38:10 +0100856 rq_unpin_lock(rq, &rf);
Peter Zijlstra1019a352014-11-26 08:44:03 +0800857 push_dl_task(rq);
Matt Flemingd8ac8972016-09-21 14:38:10 +0100858 rq_repin_lock(rq, &rf);
Peter Zijlstra0aaafaa2015-10-23 11:50:08 +0200859 }
Juri Lelli1baca4c2013-11-07 14:43:38 +0100860#endif
Peter Zijlstraa649f232015-06-11 14:46:49 +0200861
Dario Faggioliaab03e02013-11-28 11:14:43 +0100862unlock:
Peter Zijlstraeb580752015-07-31 21:28:18 +0200863 task_rq_unlock(rq, p, &rf);
Dario Faggioliaab03e02013-11-28 11:14:43 +0100864
Peter Zijlstraa649f232015-06-11 14:46:49 +0200865 /*
866 * This can free the task_struct, including this hrtimer, do not touch
867 * anything related to that after this.
868 */
869 put_task_struct(p);
870
Dario Faggioliaab03e02013-11-28 11:14:43 +0100871 return HRTIMER_NORESTART;
872}
873
874void init_dl_task_timer(struct sched_dl_entity *dl_se)
875{
876 struct hrtimer *timer = &dl_se->dl_timer;
877
Dario Faggioliaab03e02013-11-28 11:14:43 +0100878 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
879 timer->function = dl_task_timer;
880}
881
Daniel Bristot de Oliveiradf8eac82017-03-02 15:10:58 +0100882/*
883 * During the activation, CBS checks if it can reuse the current task's
884 * runtime and period. If the deadline of the task is in the past, CBS
885 * cannot use the runtime, and so it replenishes the task. This rule
886 * works fine for implicit deadline tasks (deadline == period), and the
887 * CBS was designed for implicit deadline tasks. However, a task with
888 * constrained deadline (deadine < period) might be awakened after the
889 * deadline, but before the next period. In this case, replenishing the
890 * task would allow it to run for runtime / deadline. As in this case
891 * deadline < period, CBS enables a task to run for more than the
892 * runtime / period. In a very loaded system, this can cause a domino
893 * effect, making other tasks miss their deadlines.
894 *
895 * To avoid this problem, in the activation of a constrained deadline
896 * task after the deadline but before the next period, throttle the
897 * task and set the replenishing timer to the begin of the next period,
898 * unless it is boosted.
899 */
900static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
901{
902 struct task_struct *p = dl_task_of(dl_se);
903 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
904
905 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
906 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
907 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
908 return;
909 dl_se->dl_throttled = 1;
910 }
911}
912
Dario Faggioliaab03e02013-11-28 11:14:43 +0100913static
Zhiqiang Zhang6fab5412015-06-15 11:15:20 +0800914int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
Dario Faggioliaab03e02013-11-28 11:14:43 +0100915{
Luca Abeni269ad802014-12-17 11:50:32 +0100916 return (dl_se->runtime <= 0);
Dario Faggioliaab03e02013-11-28 11:14:43 +0100917}
918
Juri Lellifaa59932014-02-21 11:37:15 +0100919extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
920
Dario Faggioliaab03e02013-11-28 11:14:43 +0100921/*
Luca Abenic52f14d2017-05-18 22:13:31 +0200922 * This function implements the GRUB accounting rule:
923 * according to the GRUB reclaiming algorithm, the runtime is
924 * not decreased as "dq = -dt", but as "dq = -Uact dt", where
925 * Uact is the (per-runqueue) active utilization.
926 * Since rq->dl.running_bw contains Uact * 2^BW_SHIFT, the result
927 * has to be shifted right by BW_SHIFT.
Luca Abeni4da3abc2017-05-18 22:13:32 +0200928 * To reclaim only a fraction Umax of the CPU time, the
929 * runtime accounting rule is modified as
930 * "dq = -Uact / Umax dt"; since rq->dl.bw_ratio contains
931 * 2^RATIO_SHIFT / Umax, delta is multiplied by bw_ratio and shifted
932 * right by RATIO_SHIFT.
933 * Since delta is a 64 bit variable, to have an overflow its value
934 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
935 * So, overflow is not an issue here.
Luca Abenic52f14d2017-05-18 22:13:31 +0200936 */
937u64 grub_reclaim(u64 delta, struct rq *rq)
938{
939 delta *= rq->dl.running_bw;
Luca Abeni4da3abc2017-05-18 22:13:32 +0200940 delta *= rq->dl.bw_ratio;
941 delta >>= BW_SHIFT + RATIO_SHIFT;
Luca Abenic52f14d2017-05-18 22:13:31 +0200942
943 return delta;
944}
945
946/*
Dario Faggioliaab03e02013-11-28 11:14:43 +0100947 * Update the current task's runtime statistics (provided it is still
948 * a -deadline task and has not been removed from the dl_rq).
949 */
950static void update_curr_dl(struct rq *rq)
951{
952 struct task_struct *curr = rq->curr;
953 struct sched_dl_entity *dl_se = &curr->dl;
954 u64 delta_exec;
955
956 if (!dl_task(curr) || !on_dl_rq(dl_se))
957 return;
958
959 /*
960 * Consumed budget is computed considering the time as
961 * observed by schedulable tasks (excluding time spent
962 * in hardirq context, etc.). Deadlines are instead
963 * computed using hard walltime. This seems to be the more
964 * natural solution, but the full ramifications of this
965 * approach need further study.
966 */
967 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
Peter Zijlstra48be3a62016-02-23 13:28:22 +0100968 if (unlikely((s64)delta_exec <= 0)) {
969 if (unlikely(dl_se->dl_yielded))
970 goto throttle;
Kirill Tkhai734ff2a2014-03-04 19:25:46 +0400971 return;
Peter Zijlstra48be3a62016-02-23 13:28:22 +0100972 }
Dario Faggioliaab03e02013-11-28 11:14:43 +0100973
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200974 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
Rafael J. Wysocki12bde332016-08-10 03:11:17 +0200975 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
Wanpeng Li594dd292016-04-22 17:07:24 +0800976
Dario Faggioliaab03e02013-11-28 11:14:43 +0100977 schedstat_set(curr->se.statistics.exec_max,
978 max(curr->se.statistics.exec_max, delta_exec));
979
980 curr->se.sum_exec_runtime += delta_exec;
981 account_group_exec_runtime(curr, delta_exec);
982
983 curr->se.exec_start = rq_clock_task(rq);
984 cpuacct_charge(curr, delta_exec);
985
Dario Faggioli239be4a2013-11-07 14:43:39 +0100986 sched_rt_avg_update(rq, delta_exec);
987
Luca Abeni2d4283e2017-05-18 22:13:33 +0200988 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
989 delta_exec = grub_reclaim(delta_exec, rq);
Peter Zijlstra48be3a62016-02-23 13:28:22 +0100990 dl_se->runtime -= delta_exec;
991
992throttle:
993 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
Peter Zijlstra1019a352014-11-26 08:44:03 +0800994 dl_se->dl_throttled = 1;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100995 __dequeue_task_dl(rq, curr, 0);
Peter Zijlstraa649f232015-06-11 14:46:49 +0200996 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
Dario Faggioliaab03e02013-11-28 11:14:43 +0100997 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
998
999 if (!is_leftmost(curr, &rq->dl))
Kirill Tkhai88751252014-06-29 00:03:57 +04001000 resched_curr(rq);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001001 }
Peter Zijlstra17248132013-12-17 12:44:49 +01001002
1003 /*
1004 * Because -- for now -- we share the rt bandwidth, we need to
1005 * account our runtime there too, otherwise actual rt tasks
1006 * would be able to exceed the shared quota.
1007 *
1008 * Account to the root rt group for now.
1009 *
1010 * The solution we're working towards is having the RT groups scheduled
1011 * using deadline servers -- however there's a few nasties to figure
1012 * out before that can happen.
1013 */
1014 if (rt_bandwidth_enabled()) {
1015 struct rt_rq *rt_rq = &rq->rt;
1016
1017 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra17248132013-12-17 12:44:49 +01001018 /*
1019 * We'll let actual RT tasks worry about the overflow here, we
Juri Lellifaa59932014-02-21 11:37:15 +01001020 * have our own CBS to keep us inline; only account when RT
1021 * bandwidth is relevant.
Peter Zijlstra17248132013-12-17 12:44:49 +01001022 */
Juri Lellifaa59932014-02-21 11:37:15 +01001023 if (sched_rt_bandwidth_account(rt_rq))
1024 rt_rq->rt_time += delta_exec;
Peter Zijlstra17248132013-12-17 12:44:49 +01001025 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1026 }
Dario Faggioliaab03e02013-11-28 11:14:43 +01001027}
1028
Luca Abeni209a0cb2017-05-18 22:13:29 +02001029static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1030{
1031 struct sched_dl_entity *dl_se = container_of(timer,
1032 struct sched_dl_entity,
1033 inactive_timer);
1034 struct task_struct *p = dl_task_of(dl_se);
1035 struct rq_flags rf;
1036 struct rq *rq;
1037
1038 rq = task_rq_lock(p, &rf);
1039
1040 if (!dl_task(p) || p->state == TASK_DEAD) {
Luca Abeni387e3132017-05-18 22:13:30 +02001041 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1042
Luca Abeni209a0cb2017-05-18 22:13:29 +02001043 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1044 sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
1045 dl_se->dl_non_contending = 0;
1046 }
Luca Abeni387e3132017-05-18 22:13:30 +02001047
1048 raw_spin_lock(&dl_b->lock);
1049 __dl_clear(dl_b, p->dl.dl_bw);
1050 raw_spin_unlock(&dl_b->lock);
Luca Abeni209a0cb2017-05-18 22:13:29 +02001051 __dl_clear_params(p);
1052
1053 goto unlock;
1054 }
1055 if (dl_se->dl_non_contending == 0)
1056 goto unlock;
1057
1058 sched_clock_tick();
1059 update_rq_clock(rq);
1060
1061 sub_running_bw(dl_se->dl_bw, &rq->dl);
1062 dl_se->dl_non_contending = 0;
1063unlock:
1064 task_rq_unlock(rq, p, &rf);
1065 put_task_struct(p);
1066
1067 return HRTIMER_NORESTART;
1068}
1069
1070void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1071{
1072 struct hrtimer *timer = &dl_se->inactive_timer;
1073
1074 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1075 timer->function = inactive_task_timer;
1076}
1077
Juri Lelli1baca4c2013-11-07 14:43:38 +01001078#ifdef CONFIG_SMP
1079
Juri Lelli1baca4c2013-11-07 14:43:38 +01001080static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1081{
1082 struct rq *rq = rq_of_dl_rq(dl_rq);
1083
1084 if (dl_rq->earliest_dl.curr == 0 ||
1085 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
Juri Lelli1baca4c2013-11-07 14:43:38 +01001086 dl_rq->earliest_dl.curr = deadline;
Tommaso Cucinottad8206bb2016-08-14 16:27:08 +02001087 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001088 }
1089}
1090
1091static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1092{
1093 struct rq *rq = rq_of_dl_rq(dl_rq);
1094
1095 /*
1096 * Since we may have removed our earliest (and/or next earliest)
1097 * task we must recompute them.
1098 */
1099 if (!dl_rq->dl_nr_running) {
1100 dl_rq->earliest_dl.curr = 0;
1101 dl_rq->earliest_dl.next = 0;
Tommaso Cucinottad8206bb2016-08-14 16:27:08 +02001102 cpudl_clear(&rq->rd->cpudl, rq->cpu);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001103 } else {
1104 struct rb_node *leftmost = dl_rq->rb_leftmost;
1105 struct sched_dl_entity *entry;
1106
1107 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1108 dl_rq->earliest_dl.curr = entry->deadline;
Tommaso Cucinottad8206bb2016-08-14 16:27:08 +02001109 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001110 }
1111}
1112
1113#else
1114
1115static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1116static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1117
1118#endif /* CONFIG_SMP */
1119
1120static inline
1121void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1122{
1123 int prio = dl_task_of(dl_se)->prio;
1124 u64 deadline = dl_se->deadline;
1125
1126 WARN_ON(!dl_prio(prio));
1127 dl_rq->dl_nr_running++;
Kirill Tkhai72465442014-05-09 03:00:14 +04001128 add_nr_running(rq_of_dl_rq(dl_rq), 1);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001129
1130 inc_dl_deadline(dl_rq, deadline);
1131 inc_dl_migration(dl_se, dl_rq);
1132}
1133
1134static inline
1135void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1136{
1137 int prio = dl_task_of(dl_se)->prio;
1138
1139 WARN_ON(!dl_prio(prio));
1140 WARN_ON(!dl_rq->dl_nr_running);
1141 dl_rq->dl_nr_running--;
Kirill Tkhai72465442014-05-09 03:00:14 +04001142 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001143
1144 dec_dl_deadline(dl_rq, dl_se->deadline);
1145 dec_dl_migration(dl_se, dl_rq);
1146}
1147
Dario Faggioliaab03e02013-11-28 11:14:43 +01001148static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1149{
1150 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1151 struct rb_node **link = &dl_rq->rb_root.rb_node;
1152 struct rb_node *parent = NULL;
1153 struct sched_dl_entity *entry;
1154 int leftmost = 1;
1155
1156 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1157
1158 while (*link) {
1159 parent = *link;
1160 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1161 if (dl_time_before(dl_se->deadline, entry->deadline))
1162 link = &parent->rb_left;
1163 else {
1164 link = &parent->rb_right;
1165 leftmost = 0;
1166 }
1167 }
1168
1169 if (leftmost)
1170 dl_rq->rb_leftmost = &dl_se->rb_node;
1171
1172 rb_link_node(&dl_se->rb_node, parent, link);
1173 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
1174
Juri Lelli1baca4c2013-11-07 14:43:38 +01001175 inc_dl_tasks(dl_se, dl_rq);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001176}
1177
1178static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1179{
1180 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1181
1182 if (RB_EMPTY_NODE(&dl_se->rb_node))
1183 return;
1184
1185 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
1186 struct rb_node *next_node;
1187
1188 next_node = rb_next(&dl_se->rb_node);
1189 dl_rq->rb_leftmost = next_node;
1190 }
1191
1192 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
1193 RB_CLEAR_NODE(&dl_se->rb_node);
1194
Juri Lelli1baca4c2013-11-07 14:43:38 +01001195 dec_dl_tasks(dl_se, dl_rq);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001196}
1197
1198static void
Dario Faggioli2d3d8912013-11-07 14:43:44 +01001199enqueue_dl_entity(struct sched_dl_entity *dl_se,
1200 struct sched_dl_entity *pi_se, int flags)
Dario Faggioliaab03e02013-11-28 11:14:43 +01001201{
1202 BUG_ON(on_dl_rq(dl_se));
1203
1204 /*
1205 * If this is a wakeup or a new instance, the scheduling
1206 * parameters of the task might need updating. Otherwise,
1207 * we want a replenishment of its runtime.
1208 */
Luca Abenie36d8672017-05-18 22:13:28 +02001209 if (flags & ENQUEUE_WAKEUP) {
Luca Abeni209a0cb2017-05-18 22:13:29 +02001210 task_contending(dl_se);
Dario Faggioli2d3d8912013-11-07 14:43:44 +01001211 update_dl_entity(dl_se, pi_se);
Luca Abenie36d8672017-05-18 22:13:28 +02001212 } else if (flags & ENQUEUE_REPLENISH) {
Luca Abeni6a503c32014-12-17 11:50:31 +01001213 replenish_dl_entity(dl_se, pi_se);
Luca Abenie36d8672017-05-18 22:13:28 +02001214 }
Dario Faggioliaab03e02013-11-28 11:14:43 +01001215
1216 __enqueue_dl_entity(dl_se);
1217}
1218
1219static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1220{
1221 __dequeue_dl_entity(dl_se);
1222}
1223
Daniel Bristot de Oliveiradf8eac82017-03-02 15:10:58 +01001224static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
1225{
1226 return dl_se->dl_deadline < dl_se->dl_period;
1227}
1228
Dario Faggioliaab03e02013-11-28 11:14:43 +01001229static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1230{
Dario Faggioli2d3d8912013-11-07 14:43:44 +01001231 struct task_struct *pi_task = rt_mutex_get_top_task(p);
1232 struct sched_dl_entity *pi_se = &p->dl;
1233
1234 /*
1235 * Use the scheduling parameters of the top pi-waiter
Andrea Parriff277d42015-08-05 15:56:19 +02001236 * task if we have one and its (absolute) deadline is
Dario Faggioli2d3d8912013-11-07 14:43:44 +01001237 * smaller than our one... OTW we keep our runtime and
1238 * deadline.
1239 */
Juri Lelli64be6f12014-10-24 10:16:37 +01001240 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
Dario Faggioli2d3d8912013-11-07 14:43:44 +01001241 pi_se = &pi_task->dl;
Juri Lelli64be6f12014-10-24 10:16:37 +01001242 } else if (!dl_prio(p->normal_prio)) {
1243 /*
1244 * Special case in which we have a !SCHED_DEADLINE task
1245 * that is going to be deboosted, but exceedes its
1246 * runtime while doing so. No point in replenishing
1247 * it, as it's going to return back to its original
1248 * scheduling class after this.
1249 */
1250 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1251 return;
1252 }
Dario Faggioli2d3d8912013-11-07 14:43:44 +01001253
Dario Faggioliaab03e02013-11-28 11:14:43 +01001254 /*
Daniel Bristot de Oliveiradf8eac82017-03-02 15:10:58 +01001255 * Check if a constrained deadline task was activated
1256 * after the deadline but before the next period.
1257 * If that is the case, the task will be throttled and
1258 * the replenishment timer will be set to the next period.
1259 */
1260 if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
1261 dl_check_constrained_dl(&p->dl);
1262
Luca Abenie36d8672017-05-18 22:13:28 +02001263 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE)
1264 add_running_bw(p->dl.dl_bw, &rq->dl);
1265
Daniel Bristot de Oliveiradf8eac82017-03-02 15:10:58 +01001266 /*
Luca Abenie36d8672017-05-18 22:13:28 +02001267 * If p is throttled, we do not enqueue it. In fact, if it exhausted
Dario Faggioliaab03e02013-11-28 11:14:43 +01001268 * its budget it needs a replenishment and, since it now is on
1269 * its rq, the bandwidth timer callback (which clearly has not
1270 * run yet) will take care of this.
Luca Abenie36d8672017-05-18 22:13:28 +02001271 * However, the active utilization does not depend on the fact
1272 * that the task is on the runqueue or not (but depends on the
1273 * task's state - in GRUB parlance, "inactive" vs "active contending").
1274 * In other words, even if a task is throttled its utilization must
1275 * be counted in the active utilization; hence, we need to call
1276 * add_running_bw().
Dario Faggioliaab03e02013-11-28 11:14:43 +01001277 */
Luca Abenie36d8672017-05-18 22:13:28 +02001278 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
Luca Abeni209a0cb2017-05-18 22:13:29 +02001279 if (flags & ENQUEUE_WAKEUP)
1280 task_contending(&p->dl);
1281
Dario Faggioliaab03e02013-11-28 11:14:43 +01001282 return;
Luca Abenie36d8672017-05-18 22:13:28 +02001283 }
Dario Faggioliaab03e02013-11-28 11:14:43 +01001284
Dario Faggioli2d3d8912013-11-07 14:43:44 +01001285 enqueue_dl_entity(&p->dl, pi_se, flags);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001286
Ingo Molnar4b53a342017-02-05 15:41:03 +01001287 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
Juri Lelli1baca4c2013-11-07 14:43:38 +01001288 enqueue_pushable_dl_task(rq, p);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001289}
1290
1291static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1292{
1293 dequeue_dl_entity(&p->dl);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001294 dequeue_pushable_dl_task(rq, p);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001295}
1296
1297static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1298{
1299 update_curr_dl(rq);
1300 __dequeue_task_dl(rq, p, flags);
Luca Abenie36d8672017-05-18 22:13:28 +02001301
1302 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE)
1303 sub_running_bw(p->dl.dl_bw, &rq->dl);
1304
1305 /*
Luca Abeni209a0cb2017-05-18 22:13:29 +02001306 * This check allows to start the inactive timer (or to immediately
1307 * decrease the active utilization, if needed) in two cases:
Luca Abenie36d8672017-05-18 22:13:28 +02001308 * when the task blocks and when it is terminating
1309 * (p->state == TASK_DEAD). We can handle the two cases in the same
1310 * way, because from GRUB's point of view the same thing is happening
1311 * (the task moves from "active contending" to "active non contending"
1312 * or "inactive")
1313 */
1314 if (flags & DEQUEUE_SLEEP)
Luca Abeni209a0cb2017-05-18 22:13:29 +02001315 task_non_contending(p);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001316}
1317
1318/*
1319 * Yield task semantic for -deadline tasks is:
1320 *
1321 * get off from the CPU until our next instance, with
1322 * a new runtime. This is of little use now, since we
1323 * don't have a bandwidth reclaiming mechanism. Anyway,
1324 * bandwidth reclaiming is planned for the future, and
1325 * yield_task_dl will indicate that some spare budget
1326 * is available for other task instances to use it.
1327 */
1328static void yield_task_dl(struct rq *rq)
1329{
Dario Faggioliaab03e02013-11-28 11:14:43 +01001330 /*
1331 * We make the task go to sleep until its current deadline by
1332 * forcing its runtime to zero. This way, update_curr_dl() stops
1333 * it and the bandwidth timer will wake it up and will give it
Juri Lelli5bfd1262014-04-15 13:49:04 +02001334 * new scheduling parameters (thanks to dl_yielded=1).
Dario Faggioliaab03e02013-11-28 11:14:43 +01001335 */
Peter Zijlstra48be3a62016-02-23 13:28:22 +01001336 rq->curr->dl.dl_yielded = 1;
1337
Kirill Tkhai6f1607f2015-02-04 12:09:32 +03001338 update_rq_clock(rq);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001339 update_curr_dl(rq);
Wanpeng Li44fb0852015-03-10 12:20:00 +08001340 /*
1341 * Tell update_rq_clock() that we've just updated,
1342 * so we don't do microscopic update in schedule()
1343 * and double the fastpath cost.
1344 */
1345 rq_clock_skip_update(rq, true);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001346}
1347
Juri Lelli1baca4c2013-11-07 14:43:38 +01001348#ifdef CONFIG_SMP
1349
1350static int find_later_rq(struct task_struct *task);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001351
1352static int
1353select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1354{
1355 struct task_struct *curr;
1356 struct rq *rq;
1357
Wanpeng Li1d7e9742014-10-14 10:22:39 +08001358 if (sd_flag != SD_BALANCE_WAKE)
Juri Lelli1baca4c2013-11-07 14:43:38 +01001359 goto out;
1360
1361 rq = cpu_rq(cpu);
1362
1363 rcu_read_lock();
Jason Low316c1608d2015-04-28 13:00:20 -07001364 curr = READ_ONCE(rq->curr); /* unlocked access */
Juri Lelli1baca4c2013-11-07 14:43:38 +01001365
1366 /*
1367 * If we are dealing with a -deadline task, we must
1368 * decide where to wake it up.
1369 * If it has a later deadline and the current task
1370 * on this rq can't move (provided the waking task
1371 * can!) we prefer to send it somewhere else. On the
1372 * other hand, if it has a shorter deadline, we
1373 * try to make it stay here, it might be important.
1374 */
1375 if (unlikely(dl_task(curr)) &&
Ingo Molnar4b53a342017-02-05 15:41:03 +01001376 (curr->nr_cpus_allowed < 2 ||
Juri Lelli1baca4c2013-11-07 14:43:38 +01001377 !dl_entity_preempt(&p->dl, &curr->dl)) &&
Ingo Molnar4b53a342017-02-05 15:41:03 +01001378 (p->nr_cpus_allowed > 1)) {
Juri Lelli1baca4c2013-11-07 14:43:38 +01001379 int target = find_later_rq(p);
1380
Wanpeng Li9d514262015-05-13 14:01:03 +08001381 if (target != -1 &&
Luca Abeni5aa50502015-10-16 10:06:21 +02001382 (dl_time_before(p->dl.deadline,
1383 cpu_rq(target)->dl.earliest_dl.curr) ||
1384 (cpu_rq(target)->dl.dl_nr_running == 0)))
Juri Lelli1baca4c2013-11-07 14:43:38 +01001385 cpu = target;
1386 }
1387 rcu_read_unlock();
1388
1389out:
1390 return cpu;
1391}
1392
Luca Abeni209a0cb2017-05-18 22:13:29 +02001393static void migrate_task_rq_dl(struct task_struct *p)
1394{
1395 struct rq *rq;
1396
1397 if (!(p->state == TASK_WAKING) || !(p->dl.dl_non_contending))
1398 return;
1399
1400 rq = task_rq(p);
1401 /*
1402 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1403 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1404 * rq->lock is not... So, lock it
1405 */
1406 raw_spin_lock(&rq->lock);
1407 sub_running_bw(p->dl.dl_bw, &rq->dl);
1408 p->dl.dl_non_contending = 0;
1409 /*
1410 * If the timer handler is currently running and the
1411 * timer cannot be cancelled, inactive_task_timer()
1412 * will see that dl_not_contending is not set, and
1413 * will not touch the rq's active utilization,
1414 * so we are still safe.
1415 */
1416 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1417 put_task_struct(p);
1418
1419 raw_spin_unlock(&rq->lock);
1420}
1421
Juri Lelli1baca4c2013-11-07 14:43:38 +01001422static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1423{
1424 /*
1425 * Current can't be migrated, useless to reschedule,
1426 * let's hope p can move out.
1427 */
Ingo Molnar4b53a342017-02-05 15:41:03 +01001428 if (rq->curr->nr_cpus_allowed == 1 ||
Juri Lelli6bfd6d72013-11-07 14:43:47 +01001429 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
Juri Lelli1baca4c2013-11-07 14:43:38 +01001430 return;
1431
1432 /*
1433 * p is migratable, so let's not schedule it and
1434 * see if it is pushed or pulled somewhere else.
1435 */
Ingo Molnar4b53a342017-02-05 15:41:03 +01001436 if (p->nr_cpus_allowed != 1 &&
Juri Lelli6bfd6d72013-11-07 14:43:47 +01001437 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
Juri Lelli1baca4c2013-11-07 14:43:38 +01001438 return;
1439
Kirill Tkhai88751252014-06-29 00:03:57 +04001440 resched_curr(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001441}
1442
1443#endif /* CONFIG_SMP */
1444
Dario Faggioliaab03e02013-11-28 11:14:43 +01001445/*
1446 * Only called when both the current and waking task are -deadline
1447 * tasks.
1448 */
1449static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1450 int flags)
1451{
Juri Lelli1baca4c2013-11-07 14:43:38 +01001452 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
Kirill Tkhai88751252014-06-29 00:03:57 +04001453 resched_curr(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001454 return;
1455 }
1456
1457#ifdef CONFIG_SMP
1458 /*
1459 * In the unlikely case current and p have the same deadline
1460 * let us try to decide what's the best thing to do...
1461 */
Dario Faggioli332ac172013-11-07 14:43:45 +01001462 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1463 !test_tsk_need_resched(rq->curr))
Juri Lelli1baca4c2013-11-07 14:43:38 +01001464 check_preempt_equal_dl(rq, p);
1465#endif /* CONFIG_SMP */
Dario Faggioliaab03e02013-11-28 11:14:43 +01001466}
1467
1468#ifdef CONFIG_SCHED_HRTICK
1469static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1470{
xiaofeng.yan177ef2a2014-08-26 03:15:41 +00001471 hrtick_start(rq, p->dl.runtime);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001472}
Wanpeng Li36ce9882014-11-11 09:52:26 +08001473#else /* !CONFIG_SCHED_HRTICK */
1474static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1475{
1476}
Dario Faggioliaab03e02013-11-28 11:14:43 +01001477#endif
1478
1479static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1480 struct dl_rq *dl_rq)
1481{
1482 struct rb_node *left = dl_rq->rb_leftmost;
1483
1484 if (!left)
1485 return NULL;
1486
1487 return rb_entry(left, struct sched_dl_entity, rb_node);
1488}
1489
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001490struct task_struct *
Matt Flemingd8ac8972016-09-21 14:38:10 +01001491pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
Dario Faggioliaab03e02013-11-28 11:14:43 +01001492{
1493 struct sched_dl_entity *dl_se;
1494 struct task_struct *p;
1495 struct dl_rq *dl_rq;
1496
1497 dl_rq = &rq->dl;
1498
Kirill Tkhaia1d9a322014-04-10 17:38:36 +04001499 if (need_pull_dl_task(rq, prev)) {
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001500 /*
1501 * This is OK, because current is on_cpu, which avoids it being
1502 * picked for load-balance and preemption/IRQs are still
1503 * disabled avoiding further scheduler activity on it and we're
1504 * being very careful to re-start the picking loop.
1505 */
Matt Flemingd8ac8972016-09-21 14:38:10 +01001506 rq_unpin_lock(rq, rf);
Peter Zijlstra38033c32014-01-23 20:32:21 +01001507 pull_dl_task(rq);
Matt Flemingd8ac8972016-09-21 14:38:10 +01001508 rq_repin_lock(rq, rf);
Kirill Tkhaia1d9a322014-04-10 17:38:36 +04001509 /*
T.Zhou176cedc2016-11-23 08:48:32 +08001510 * pull_dl_task() can drop (and re-acquire) rq->lock; this
Kirill Tkhaia1d9a322014-04-10 17:38:36 +04001511 * means a stop task can slip in, in which case we need to
1512 * re-start task selection.
1513 */
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001514 if (rq->stop && task_on_rq_queued(rq->stop))
Kirill Tkhaia1d9a322014-04-10 17:38:36 +04001515 return RETRY_TASK;
1516 }
1517
Kirill Tkhai734ff2a2014-03-04 19:25:46 +04001518 /*
1519 * When prev is DL, we may throttle it in put_prev_task().
1520 * So, we update time before we check for dl_nr_running.
1521 */
1522 if (prev->sched_class == &dl_sched_class)
1523 update_curr_dl(rq);
Peter Zijlstra38033c32014-01-23 20:32:21 +01001524
Dario Faggioliaab03e02013-11-28 11:14:43 +01001525 if (unlikely(!dl_rq->dl_nr_running))
1526 return NULL;
1527
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01001528 put_prev_task(rq, prev);
Peter Zijlstra606dba22012-02-11 06:05:00 +01001529
Dario Faggioliaab03e02013-11-28 11:14:43 +01001530 dl_se = pick_next_dl_entity(rq, dl_rq);
1531 BUG_ON(!dl_se);
1532
1533 p = dl_task_of(dl_se);
1534 p->se.exec_start = rq_clock_task(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001535
1536 /* Running task will never be pushed. */
Juri Lelli71362652014-01-14 12:03:51 +01001537 dequeue_pushable_dl_task(rq, p);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001538
Dario Faggioliaab03e02013-11-28 11:14:43 +01001539 if (hrtick_enabled(rq))
1540 start_hrtick_dl(rq, p);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001541
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +02001542 queue_push_tasks(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001543
Dario Faggioliaab03e02013-11-28 11:14:43 +01001544 return p;
1545}
1546
1547static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1548{
1549 update_curr_dl(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001550
Ingo Molnar4b53a342017-02-05 15:41:03 +01001551 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
Juri Lelli1baca4c2013-11-07 14:43:38 +01001552 enqueue_pushable_dl_task(rq, p);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001553}
1554
1555static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1556{
1557 update_curr_dl(rq);
1558
Wanpeng Lia7bebf42014-11-26 08:44:01 +08001559 /*
1560 * Even when we have runtime, update_curr_dl() might have resulted in us
1561 * not being the leftmost task anymore. In that case NEED_RESCHED will
1562 * be set and schedule() will start a new hrtick for the next task.
1563 */
1564 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1565 is_leftmost(p, &rq->dl))
Dario Faggioliaab03e02013-11-28 11:14:43 +01001566 start_hrtick_dl(rq, p);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001567}
1568
1569static void task_fork_dl(struct task_struct *p)
1570{
1571 /*
1572 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1573 * sched_fork()
1574 */
1575}
1576
Dario Faggioliaab03e02013-11-28 11:14:43 +01001577static void set_curr_task_dl(struct rq *rq)
1578{
1579 struct task_struct *p = rq->curr;
1580
1581 p->se.exec_start = rq_clock_task(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001582
1583 /* You can't push away the running task */
1584 dequeue_pushable_dl_task(rq, p);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001585}
1586
Juri Lelli1baca4c2013-11-07 14:43:38 +01001587#ifdef CONFIG_SMP
1588
1589/* Only try algorithms three times */
1590#define DL_MAX_TRIES 3
1591
1592static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1593{
1594 if (!task_running(rq, p) &&
Ingo Molnar0c98d342017-02-05 15:38:10 +01001595 cpumask_test_cpu(cpu, &p->cpus_allowed))
Juri Lelli1baca4c2013-11-07 14:43:38 +01001596 return 1;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001597 return 0;
1598}
1599
Wanpeng Li8b5e7702015-05-13 14:01:01 +08001600/*
1601 * Return the earliest pushable rq's task, which is suitable to be executed
1602 * on the CPU, NULL otherwise:
1603 */
1604static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1605{
1606 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1607 struct task_struct *p = NULL;
1608
1609 if (!has_pushable_dl_tasks(rq))
1610 return NULL;
1611
1612next_node:
1613 if (next_node) {
1614 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1615
1616 if (pick_dl_task(rq, p, cpu))
1617 return p;
1618
1619 next_node = rb_next(next_node);
1620 goto next_node;
1621 }
1622
1623 return NULL;
1624}
1625
Juri Lelli1baca4c2013-11-07 14:43:38 +01001626static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1627
1628static int find_later_rq(struct task_struct *task)
1629{
1630 struct sched_domain *sd;
Christoph Lameter4ba29682014-08-26 19:12:21 -05001631 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001632 int this_cpu = smp_processor_id();
1633 int best_cpu, cpu = task_cpu(task);
1634
1635 /* Make sure the mask is initialized first */
1636 if (unlikely(!later_mask))
1637 return -1;
1638
Ingo Molnar4b53a342017-02-05 15:41:03 +01001639 if (task->nr_cpus_allowed == 1)
Juri Lelli1baca4c2013-11-07 14:43:38 +01001640 return -1;
1641
Juri Lelli91ec6772014-09-19 10:22:41 +01001642 /*
1643 * We have to consider system topology and task affinity
1644 * first, then we can look for a suitable cpu.
1645 */
Juri Lelli6bfd6d72013-11-07 14:43:47 +01001646 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1647 task, later_mask);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001648 if (best_cpu == -1)
1649 return -1;
1650
1651 /*
1652 * If we are here, some target has been found,
1653 * the most suitable of which is cached in best_cpu.
1654 * This is, among the runqueues where the current tasks
1655 * have later deadlines than the task's one, the rq
1656 * with the latest possible one.
1657 *
1658 * Now we check how well this matches with task's
1659 * affinity and system topology.
1660 *
1661 * The last cpu where the task run is our first
1662 * guess, since it is most likely cache-hot there.
1663 */
1664 if (cpumask_test_cpu(cpu, later_mask))
1665 return cpu;
1666 /*
1667 * Check if this_cpu is to be skipped (i.e., it is
1668 * not in the mask) or not.
1669 */
1670 if (!cpumask_test_cpu(this_cpu, later_mask))
1671 this_cpu = -1;
1672
1673 rcu_read_lock();
1674 for_each_domain(cpu, sd) {
1675 if (sd->flags & SD_WAKE_AFFINE) {
1676
1677 /*
1678 * If possible, preempting this_cpu is
1679 * cheaper than migrating.
1680 */
1681 if (this_cpu != -1 &&
1682 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1683 rcu_read_unlock();
1684 return this_cpu;
1685 }
1686
1687 /*
1688 * Last chance: if best_cpu is valid and is
1689 * in the mask, that becomes our choice.
1690 */
1691 if (best_cpu < nr_cpu_ids &&
1692 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1693 rcu_read_unlock();
1694 return best_cpu;
1695 }
1696 }
1697 }
1698 rcu_read_unlock();
1699
1700 /*
1701 * At this point, all our guesses failed, we just return
1702 * 'something', and let the caller sort the things out.
1703 */
1704 if (this_cpu != -1)
1705 return this_cpu;
1706
1707 cpu = cpumask_any(later_mask);
1708 if (cpu < nr_cpu_ids)
1709 return cpu;
1710
1711 return -1;
1712}
1713
1714/* Locks the rq it finds */
1715static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1716{
1717 struct rq *later_rq = NULL;
1718 int tries;
1719 int cpu;
1720
1721 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1722 cpu = find_later_rq(task);
1723
1724 if ((cpu == -1) || (cpu == rq->cpu))
1725 break;
1726
1727 later_rq = cpu_rq(cpu);
1728
Luca Abeni5aa50502015-10-16 10:06:21 +02001729 if (later_rq->dl.dl_nr_running &&
1730 !dl_time_before(task->dl.deadline,
Wanpeng Li9d514262015-05-13 14:01:03 +08001731 later_rq->dl.earliest_dl.curr)) {
1732 /*
1733 * Target rq has tasks of equal or earlier deadline,
1734 * retrying does not release any lock and is unlikely
1735 * to yield a different result.
1736 */
1737 later_rq = NULL;
1738 break;
1739 }
1740
Juri Lelli1baca4c2013-11-07 14:43:38 +01001741 /* Retry if something changed. */
1742 if (double_lock_balance(rq, later_rq)) {
1743 if (unlikely(task_rq(task) != rq ||
Ingo Molnar0c98d342017-02-05 15:38:10 +01001744 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001745 task_running(rq, task) ||
Xunlei Pang13b5ab02016-05-09 12:11:31 +08001746 !dl_task(task) ||
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001747 !task_on_rq_queued(task))) {
Juri Lelli1baca4c2013-11-07 14:43:38 +01001748 double_unlock_balance(rq, later_rq);
1749 later_rq = NULL;
1750 break;
1751 }
1752 }
1753
1754 /*
1755 * If the rq we found has no -deadline task, or
1756 * its earliest one has a later deadline than our
1757 * task, the rq is a good one.
1758 */
1759 if (!later_rq->dl.dl_nr_running ||
1760 dl_time_before(task->dl.deadline,
1761 later_rq->dl.earliest_dl.curr))
1762 break;
1763
1764 /* Otherwise we try again. */
1765 double_unlock_balance(rq, later_rq);
1766 later_rq = NULL;
1767 }
1768
1769 return later_rq;
1770}
1771
1772static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1773{
1774 struct task_struct *p;
1775
1776 if (!has_pushable_dl_tasks(rq))
1777 return NULL;
1778
1779 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1780 struct task_struct, pushable_dl_tasks);
1781
1782 BUG_ON(rq->cpu != task_cpu(p));
1783 BUG_ON(task_current(rq, p));
Ingo Molnar4b53a342017-02-05 15:41:03 +01001784 BUG_ON(p->nr_cpus_allowed <= 1);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001785
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001786 BUG_ON(!task_on_rq_queued(p));
Juri Lelli1baca4c2013-11-07 14:43:38 +01001787 BUG_ON(!dl_task(p));
1788
1789 return p;
1790}
1791
1792/*
1793 * See if the non running -deadline tasks on this rq
1794 * can be sent to some other CPU where they can preempt
1795 * and start executing.
1796 */
1797static int push_dl_task(struct rq *rq)
1798{
1799 struct task_struct *next_task;
1800 struct rq *later_rq;
Wanpeng Lic51b8ab2014-11-06 15:22:44 +08001801 int ret = 0;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001802
1803 if (!rq->dl.overloaded)
1804 return 0;
1805
1806 next_task = pick_next_pushable_dl_task(rq);
1807 if (!next_task)
1808 return 0;
1809
1810retry:
1811 if (unlikely(next_task == rq->curr)) {
1812 WARN_ON(1);
1813 return 0;
1814 }
1815
1816 /*
1817 * If next_task preempts rq->curr, and rq->curr
1818 * can move away, it makes sense to just reschedule
1819 * without going further in pushing next_task.
1820 */
1821 if (dl_task(rq->curr) &&
1822 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
Ingo Molnar4b53a342017-02-05 15:41:03 +01001823 rq->curr->nr_cpus_allowed > 1) {
Kirill Tkhai88751252014-06-29 00:03:57 +04001824 resched_curr(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001825 return 0;
1826 }
1827
1828 /* We might release rq lock */
1829 get_task_struct(next_task);
1830
1831 /* Will lock the rq it'll find */
1832 later_rq = find_lock_later_rq(next_task, rq);
1833 if (!later_rq) {
1834 struct task_struct *task;
1835
1836 /*
1837 * We must check all this again, since
1838 * find_lock_later_rq releases rq->lock and it is
1839 * then possible that next_task has migrated.
1840 */
1841 task = pick_next_pushable_dl_task(rq);
Byungchul Parka776b962017-05-12 10:05:59 +09001842 if (task == next_task) {
Juri Lelli1baca4c2013-11-07 14:43:38 +01001843 /*
1844 * The task is still there. We don't try
1845 * again, some other cpu will pull it when ready.
1846 */
Juri Lelli1baca4c2013-11-07 14:43:38 +01001847 goto out;
1848 }
1849
1850 if (!task)
1851 /* No more tasks */
1852 goto out;
1853
1854 put_task_struct(next_task);
1855 next_task = task;
1856 goto retry;
1857 }
1858
1859 deactivate_task(rq, next_task, 0);
Luca Abenie36d8672017-05-18 22:13:28 +02001860 sub_running_bw(next_task->dl.dl_bw, &rq->dl);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001861 set_task_cpu(next_task, later_rq->cpu);
Luca Abenie36d8672017-05-18 22:13:28 +02001862 add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001863 activate_task(later_rq, next_task, 0);
Wanpeng Lic51b8ab2014-11-06 15:22:44 +08001864 ret = 1;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001865
Kirill Tkhai88751252014-06-29 00:03:57 +04001866 resched_curr(later_rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001867
1868 double_unlock_balance(rq, later_rq);
1869
1870out:
1871 put_task_struct(next_task);
1872
Wanpeng Lic51b8ab2014-11-06 15:22:44 +08001873 return ret;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001874}
1875
1876static void push_dl_tasks(struct rq *rq)
1877{
Andrea Parri4ffa08e2015-08-05 15:56:18 +02001878 /* push_dl_task() will return true if it moved a -deadline task */
Juri Lelli1baca4c2013-11-07 14:43:38 +01001879 while (push_dl_task(rq))
1880 ;
1881}
1882
Peter Zijlstra0ea60c22015-06-11 14:46:42 +02001883static void pull_dl_task(struct rq *this_rq)
Juri Lelli1baca4c2013-11-07 14:43:38 +01001884{
Peter Zijlstra0ea60c22015-06-11 14:46:42 +02001885 int this_cpu = this_rq->cpu, cpu;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001886 struct task_struct *p;
Peter Zijlstra0ea60c22015-06-11 14:46:42 +02001887 bool resched = false;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001888 struct rq *src_rq;
1889 u64 dmin = LONG_MAX;
1890
1891 if (likely(!dl_overloaded(this_rq)))
Peter Zijlstra0ea60c22015-06-11 14:46:42 +02001892 return;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001893
1894 /*
1895 * Match the barrier from dl_set_overloaded; this guarantees that if we
1896 * see overloaded we must also see the dlo_mask bit.
1897 */
1898 smp_rmb();
1899
1900 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1901 if (this_cpu == cpu)
1902 continue;
1903
1904 src_rq = cpu_rq(cpu);
1905
1906 /*
1907 * It looks racy, abd it is! However, as in sched_rt.c,
1908 * we are fine with this.
1909 */
1910 if (this_rq->dl.dl_nr_running &&
1911 dl_time_before(this_rq->dl.earliest_dl.curr,
1912 src_rq->dl.earliest_dl.next))
1913 continue;
1914
1915 /* Might drop this_rq->lock */
1916 double_lock_balance(this_rq, src_rq);
1917
1918 /*
1919 * If there are no more pullable tasks on the
1920 * rq, we're done with it.
1921 */
1922 if (src_rq->dl.dl_nr_running <= 1)
1923 goto skip;
1924
Wanpeng Li8b5e7702015-05-13 14:01:01 +08001925 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001926
1927 /*
1928 * We found a task to be pulled if:
1929 * - it preempts our current (if there's one),
1930 * - it will preempt the last one we pulled (if any).
1931 */
1932 if (p && dl_time_before(p->dl.deadline, dmin) &&
1933 (!this_rq->dl.dl_nr_running ||
1934 dl_time_before(p->dl.deadline,
1935 this_rq->dl.earliest_dl.curr))) {
1936 WARN_ON(p == src_rq->curr);
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001937 WARN_ON(!task_on_rq_queued(p));
Juri Lelli1baca4c2013-11-07 14:43:38 +01001938
1939 /*
1940 * Then we pull iff p has actually an earlier
1941 * deadline than the current task of its runqueue.
1942 */
1943 if (dl_time_before(p->dl.deadline,
1944 src_rq->curr->dl.deadline))
1945 goto skip;
1946
Peter Zijlstra0ea60c22015-06-11 14:46:42 +02001947 resched = true;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001948
1949 deactivate_task(src_rq, p, 0);
Luca Abenie36d8672017-05-18 22:13:28 +02001950 sub_running_bw(p->dl.dl_bw, &src_rq->dl);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001951 set_task_cpu(p, this_cpu);
Luca Abenie36d8672017-05-18 22:13:28 +02001952 add_running_bw(p->dl.dl_bw, &this_rq->dl);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001953 activate_task(this_rq, p, 0);
1954 dmin = p->dl.deadline;
1955
1956 /* Is there any other task even earlier? */
1957 }
1958skip:
1959 double_unlock_balance(this_rq, src_rq);
1960 }
1961
Peter Zijlstra0ea60c22015-06-11 14:46:42 +02001962 if (resched)
1963 resched_curr(this_rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001964}
1965
1966/*
1967 * Since the task is not running and a reschedule is not going to happen
1968 * anytime soon on its runqueue, we try pushing it away now.
1969 */
1970static void task_woken_dl(struct rq *rq, struct task_struct *p)
1971{
1972 if (!task_running(rq, p) &&
1973 !test_tsk_need_resched(rq->curr) &&
Ingo Molnar4b53a342017-02-05 15:41:03 +01001974 p->nr_cpus_allowed > 1 &&
Juri Lelli1baca4c2013-11-07 14:43:38 +01001975 dl_task(rq->curr) &&
Ingo Molnar4b53a342017-02-05 15:41:03 +01001976 (rq->curr->nr_cpus_allowed < 2 ||
Wanpeng Li6b0a5632014-10-31 06:39:34 +08001977 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
Juri Lelli1baca4c2013-11-07 14:43:38 +01001978 push_dl_tasks(rq);
1979 }
1980}
1981
1982static void set_cpus_allowed_dl(struct task_struct *p,
1983 const struct cpumask *new_mask)
1984{
Juri Lelli7f514122014-09-19 10:22:40 +01001985 struct root_domain *src_rd;
Peter Zijlstra6c370672015-05-15 17:43:36 +02001986 struct rq *rq;
Juri Lelli1baca4c2013-11-07 14:43:38 +01001987
1988 BUG_ON(!dl_task(p));
1989
Juri Lelli7f514122014-09-19 10:22:40 +01001990 rq = task_rq(p);
1991 src_rd = rq->rd;
1992 /*
1993 * Migrating a SCHED_DEADLINE task between exclusive
1994 * cpusets (different root_domains) entails a bandwidth
1995 * update. We already made space for us in the destination
1996 * domain (see cpuset_can_attach()).
1997 */
1998 if (!cpumask_intersects(src_rd->span, new_mask)) {
1999 struct dl_bw *src_dl_b;
2000
2001 src_dl_b = dl_bw_of(cpu_of(rq));
2002 /*
2003 * We now free resources of the root_domain we are migrating
2004 * off. In the worst case, sched_setattr() may temporary fail
2005 * until we complete the update.
2006 */
2007 raw_spin_lock(&src_dl_b->lock);
2008 __dl_clear(src_dl_b, p->dl.dl_bw);
2009 raw_spin_unlock(&src_dl_b->lock);
2010 }
2011
Peter Zijlstra6c370672015-05-15 17:43:36 +02002012 set_cpus_allowed_common(p, new_mask);
Juri Lelli1baca4c2013-11-07 14:43:38 +01002013}
2014
2015/* Assumes rq->lock is held */
2016static void rq_online_dl(struct rq *rq)
2017{
2018 if (rq->dl.overloaded)
2019 dl_set_overload(rq);
Juri Lelli6bfd6d72013-11-07 14:43:47 +01002020
Xunlei Pang16b26942015-01-19 04:49:36 +00002021 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
Juri Lelli6bfd6d72013-11-07 14:43:47 +01002022 if (rq->dl.dl_nr_running > 0)
Tommaso Cucinottad8206bb2016-08-14 16:27:08 +02002023 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
Juri Lelli1baca4c2013-11-07 14:43:38 +01002024}
2025
2026/* Assumes rq->lock is held */
2027static void rq_offline_dl(struct rq *rq)
2028{
2029 if (rq->dl.overloaded)
2030 dl_clear_overload(rq);
Juri Lelli6bfd6d72013-11-07 14:43:47 +01002031
Tommaso Cucinottad8206bb2016-08-14 16:27:08 +02002032 cpudl_clear(&rq->rd->cpudl, rq->cpu);
Xunlei Pang16b26942015-01-19 04:49:36 +00002033 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
Juri Lelli1baca4c2013-11-07 14:43:38 +01002034}
2035
Wanpeng Lia6c0e742015-05-13 14:01:02 +08002036void __init init_sched_dl_class(void)
Juri Lelli1baca4c2013-11-07 14:43:38 +01002037{
2038 unsigned int i;
2039
2040 for_each_possible_cpu(i)
2041 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2042 GFP_KERNEL, cpu_to_node(i));
2043}
2044
2045#endif /* CONFIG_SMP */
2046
Dario Faggioliaab03e02013-11-28 11:14:43 +01002047static void switched_from_dl(struct rq *rq, struct task_struct *p)
2048{
Peter Zijlstraa649f232015-06-11 14:46:49 +02002049 /*
Luca Abeni209a0cb2017-05-18 22:13:29 +02002050 * task_non_contending() can start the "inactive timer" (if the 0-lag
2051 * time is in the future). If the task switches back to dl before
2052 * the "inactive timer" fires, it can continue to consume its current
2053 * runtime using its current deadline. If it stays outside of
2054 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2055 * will reset the task parameters.
Peter Zijlstraa649f232015-06-11 14:46:49 +02002056 */
Luca Abeni209a0cb2017-05-18 22:13:29 +02002057 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2058 task_non_contending(p);
2059
2060 /*
2061 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2062 * at the 0-lag time, because the task could have been migrated
2063 * while SCHED_OTHER in the meanwhile.
2064 */
2065 if (p->dl.dl_non_contending)
2066 p->dl.dl_non_contending = 0;
Juri Lellia5e7be32014-09-19 10:22:39 +01002067
Juri Lelli1baca4c2013-11-07 14:43:38 +01002068 /*
2069 * Since this might be the only -deadline task on the rq,
2070 * this is the right place to try to pull some other one
2071 * from an overloaded cpu, if any.
2072 */
Wanpeng Licd660912014-10-31 06:39:35 +08002073 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2074 return;
2075
Peter Zijlstra9916e212015-06-11 14:46:43 +02002076 queue_pull_task(rq);
Dario Faggioliaab03e02013-11-28 11:14:43 +01002077}
2078
Juri Lelli1baca4c2013-11-07 14:43:38 +01002079/*
2080 * When switching to -deadline, we may overload the rq, then
2081 * we try to push someone off, if possible.
2082 */
Dario Faggioliaab03e02013-11-28 11:14:43 +01002083static void switched_to_dl(struct rq *rq, struct task_struct *p)
2084{
Luca Abeni209a0cb2017-05-18 22:13:29 +02002085 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2086 put_task_struct(p);
Luca Abeni72f9f3f2016-03-07 12:27:04 +01002087
Juri Lelli98b0a852016-08-05 16:07:55 +01002088 /* If p is not queued we will update its parameters at next wakeup. */
2089 if (!task_on_rq_queued(p))
2090 return;
2091
2092 /*
2093 * If p is boosted we already updated its params in
2094 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
2095 * p's deadline being now already after rq_clock(rq).
2096 */
2097 if (dl_time_before(p->dl.deadline, rq_clock(rq)))
2098 setup_new_dl_entity(&p->dl);
2099
2100 if (rq->curr != p) {
Juri Lelli1baca4c2013-11-07 14:43:38 +01002101#ifdef CONFIG_SMP
Ingo Molnar4b53a342017-02-05 15:41:03 +01002102 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
Peter Zijlstra9916e212015-06-11 14:46:43 +02002103 queue_push_tasks(rq);
Sebastian Andrzej Siewior619bd4a2017-01-24 15:40:06 +01002104#endif
Peter Zijlstra9916e212015-06-11 14:46:43 +02002105 if (dl_task(rq->curr))
2106 check_preempt_curr_dl(rq, p, 0);
2107 else
2108 resched_curr(rq);
Dario Faggioliaab03e02013-11-28 11:14:43 +01002109 }
2110}
2111
Juri Lelli1baca4c2013-11-07 14:43:38 +01002112/*
2113 * If the scheduling parameters of a -deadline task changed,
2114 * a push or pull operation might be needed.
2115 */
Dario Faggioliaab03e02013-11-28 11:14:43 +01002116static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2117 int oldprio)
2118{
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04002119 if (task_on_rq_queued(p) || rq->curr == p) {
Dario Faggioliaab03e02013-11-28 11:14:43 +01002120#ifdef CONFIG_SMP
Juri Lelli1baca4c2013-11-07 14:43:38 +01002121 /*
2122 * This might be too much, but unfortunately
2123 * we don't have the old deadline value, and
2124 * we can't argue if the task is increasing
2125 * or lowering its prio, so...
2126 */
2127 if (!rq->dl.overloaded)
Peter Zijlstra9916e212015-06-11 14:46:43 +02002128 queue_pull_task(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01002129
2130 /*
2131 * If we now have a earlier deadline task than p,
2132 * then reschedule, provided p is still on this
2133 * runqueue.
2134 */
Peter Zijlstra9916e212015-06-11 14:46:43 +02002135 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
Kirill Tkhai88751252014-06-29 00:03:57 +04002136 resched_curr(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01002137#else
2138 /*
2139 * Again, we don't know if p has a earlier
2140 * or later deadline, so let's blindly set a
2141 * (maybe not needed) rescheduling point.
2142 */
Kirill Tkhai88751252014-06-29 00:03:57 +04002143 resched_curr(rq);
Juri Lelli1baca4c2013-11-07 14:43:38 +01002144#endif /* CONFIG_SMP */
Peter Zijlstra801ccdb2016-02-25 15:01:49 +01002145 }
Dario Faggioliaab03e02013-11-28 11:14:43 +01002146}
Dario Faggioliaab03e02013-11-28 11:14:43 +01002147
2148const struct sched_class dl_sched_class = {
2149 .next = &rt_sched_class,
2150 .enqueue_task = enqueue_task_dl,
2151 .dequeue_task = dequeue_task_dl,
2152 .yield_task = yield_task_dl,
2153
2154 .check_preempt_curr = check_preempt_curr_dl,
2155
2156 .pick_next_task = pick_next_task_dl,
2157 .put_prev_task = put_prev_task_dl,
2158
2159#ifdef CONFIG_SMP
2160 .select_task_rq = select_task_rq_dl,
Luca Abeni209a0cb2017-05-18 22:13:29 +02002161 .migrate_task_rq = migrate_task_rq_dl,
Juri Lelli1baca4c2013-11-07 14:43:38 +01002162 .set_cpus_allowed = set_cpus_allowed_dl,
2163 .rq_online = rq_online_dl,
2164 .rq_offline = rq_offline_dl,
Juri Lelli1baca4c2013-11-07 14:43:38 +01002165 .task_woken = task_woken_dl,
Dario Faggioliaab03e02013-11-28 11:14:43 +01002166#endif
2167
2168 .set_curr_task = set_curr_task_dl,
2169 .task_tick = task_tick_dl,
2170 .task_fork = task_fork_dl,
Dario Faggioliaab03e02013-11-28 11:14:43 +01002171
2172 .prio_changed = prio_changed_dl,
2173 .switched_from = switched_from_dl,
2174 .switched_to = switched_to_dl,
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01002175
2176 .update_curr = update_curr_dl,
Dario Faggioliaab03e02013-11-28 11:14:43 +01002177};
Wanpeng Liacb32132014-10-31 06:39:33 +08002178
2179#ifdef CONFIG_SCHED_DEBUG
2180extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2181
2182void print_dl_stats(struct seq_file *m, int cpu)
2183{
2184 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2185}
2186#endif /* CONFIG_SCHED_DEBUG */