blob: 4c9e9975684fa8cd633ba05008493499994eb99a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002/*
3 * stop-task scheduling class.
4 *
5 * The stop task is the highest priority task in the system, it preempts
6 * everything and will be preempted by nothing.
7 *
8 * See kernel/stop_machine.c
9 */
Ingo Molnar97fb7a02018-03-03 14:01:12 +010010#include "sched.h"
Peter Zijlstra34f971f2010-09-22 13:53:15 +020011
12#ifdef CONFIG_SMP
13static int
Peter Zijlstraac66f542013-10-07 11:29:16 +010014select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020015{
16 return task_cpu(p); /* stop tasks as never migrate */
17}
Peter Zijlstra6e2df052019-11-08 11:11:52 +010018
19static int
20balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
21{
22 return sched_stop_runnable(rq);
23}
Peter Zijlstra34f971f2010-09-22 13:53:15 +020024#endif /* CONFIG_SMP */
25
26static void
27check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
28{
Peter Zijlstra1e5a7402010-10-31 12:37:04 +010029 /* we're never preempted */
Peter Zijlstra34f971f2010-09-22 13:53:15 +020030}
31
Peter Zijlstraa0e813f2019-11-08 14:16:00 +010032static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
Peter Zijlstra03b7fad2019-05-29 20:36:41 +000033{
34 stop->se.exec_start = rq_clock_task(rq);
35}
36
Peter Zijlstra98c2f702019-11-08 14:15:58 +010037static struct task_struct *pick_next_task_stop(struct rq *rq)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020038{
Peter Zijlstra6e2df052019-11-08 11:11:52 +010039 if (!sched_stop_runnable(rq))
Peter Zijlstra606dba22012-02-11 06:05:00 +010040 return NULL;
Peter Zijlstra34f971f2010-09-22 13:53:15 +020041
Peter Zijlstraa0e813f2019-11-08 14:16:00 +010042 set_next_task_stop(rq, rq->stop, true);
Peter Zijlstra6e2df052019-11-08 11:11:52 +010043 return rq->stop;
Peter Zijlstra34f971f2010-09-22 13:53:15 +020044}
45
46static void
47enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
48{
Kirill Tkhai72465442014-05-09 03:00:14 +040049 add_nr_running(rq, 1);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020050}
51
52static void
53dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
54{
Kirill Tkhai72465442014-05-09 03:00:14 +040055 sub_nr_running(rq, 1);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020056}
57
58static void yield_task_stop(struct rq *rq)
59{
60 BUG(); /* the stop task should never yield, its pointless. */
61}
62
Peter Zijlstra6e2df052019-11-08 11:11:52 +010063static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020064{
Mike Galbraith8f618962012-08-04 05:44:14 +020065 struct task_struct *curr = rq->curr;
66 u64 delta_exec;
67
Frederic Weisbecker78becc22013-04-12 01:51:02 +020068 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
Mike Galbraith8f618962012-08-04 05:44:14 +020069 if (unlikely((s64)delta_exec < 0))
70 delta_exec = 0;
71
72 schedstat_set(curr->se.statistics.exec_max,
73 max(curr->se.statistics.exec_max, delta_exec));
74
75 curr->se.sum_exec_runtime += delta_exec;
76 account_group_exec_runtime(curr, delta_exec);
77
Frederic Weisbecker78becc22013-04-12 01:51:02 +020078 curr->se.exec_start = rq_clock_task(rq);
Tejun Heod2cc5ed2017-09-25 08:12:04 -070079 cgroup_account_cputime(curr, delta_exec);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020080}
81
Frederic Weisbeckerd84b3132018-02-21 05:17:27 +010082/*
83 * scheduler tick hitting a task of our scheduling class.
84 *
85 * NOTE: This function can be called remotely by the tick offload that
86 * goes along full dynticks. Therefore no local assumption can be made
87 * and everything must be accessed through the @rq and @curr passed in
88 * parameters.
89 */
Peter Zijlstra34f971f2010-09-22 13:53:15 +020090static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
91{
92}
93
Peter Zijlstrada7a7352011-01-17 17:03:27 +010094static void switched_to_stop(struct rq *rq, struct task_struct *p)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020095{
96 BUG(); /* its impossible to change to this class */
97}
98
Peter Zijlstrada7a7352011-01-17 17:03:27 +010099static void
100prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200101{
102 BUG(); /* how!?, what priority? */
103}
104
105static unsigned int
106get_rr_interval_stop(struct rq *rq, struct task_struct *task)
107{
108 return 0;
109}
110
Thomas Gleixner90e362f2014-11-23 23:04:52 +0100111static void update_curr_stop(struct rq *rq)
112{
113}
114
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200115/*
116 * Simple, special scheduling class for the per-CPU stop tasks:
117 */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200118const struct sched_class stop_sched_class = {
Dario Faggioliaab03e02013-11-28 11:14:43 +0100119 .next = &dl_sched_class,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200120
121 .enqueue_task = enqueue_task_stop,
122 .dequeue_task = dequeue_task_stop,
123 .yield_task = yield_task_stop,
124
125 .check_preempt_curr = check_preempt_curr_stop,
126
127 .pick_next_task = pick_next_task_stop,
128 .put_prev_task = put_prev_task_stop,
Peter Zijlstra03b7fad2019-05-29 20:36:41 +0000129 .set_next_task = set_next_task_stop,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200130
131#ifdef CONFIG_SMP
Peter Zijlstra6e2df052019-11-08 11:11:52 +0100132 .balance = balance_stop,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200133 .select_task_rq = select_task_rq_stop,
Peter Zijlstrac5b28032015-05-15 17:43:35 +0200134 .set_cpus_allowed = set_cpus_allowed_common,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200135#endif
136
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200137 .task_tick = task_tick_stop,
138
139 .get_rr_interval = get_rr_interval_stop,
140
141 .prio_changed = prio_changed_stop,
142 .switched_to = switched_to_stop,
Thomas Gleixner90e362f2014-11-23 23:04:52 +0100143 .update_curr = update_curr_stop,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200144};