Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 2 | /* |
| 3 | * stop-task scheduling class. |
| 4 | * |
| 5 | * The stop task is the highest priority task in the system, it preempts |
| 6 | * everything and will be preempted by nothing. |
| 7 | * |
| 8 | * See kernel/stop_machine.c |
| 9 | */ |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 10 | #include "sched.h" |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 11 | |
| 12 | #ifdef CONFIG_SMP |
| 13 | static int |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 14 | select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags) |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 15 | { |
| 16 | return task_cpu(p); /* stop tasks as never migrate */ |
| 17 | } |
Peter Zijlstra | 6e2df05 | 2019-11-08 11:11:52 +0100 | [diff] [blame] | 18 | |
| 19 | static int |
| 20 | balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
| 21 | { |
| 22 | return sched_stop_runnable(rq); |
| 23 | } |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 24 | #endif /* CONFIG_SMP */ |
| 25 | |
| 26 | static void |
| 27 | check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) |
| 28 | { |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 29 | /* we're never preempted */ |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 30 | } |
| 31 | |
Peter Zijlstra | a0e813f | 2019-11-08 14:16:00 +0100 | [diff] [blame] | 32 | static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) |
Peter Zijlstra | 03b7fad | 2019-05-29 20:36:41 +0000 | [diff] [blame] | 33 | { |
| 34 | stop->se.exec_start = rq_clock_task(rq); |
| 35 | } |
| 36 | |
Peter Zijlstra | 98c2f70 | 2019-11-08 14:15:58 +0100 | [diff] [blame] | 37 | static struct task_struct *pick_next_task_stop(struct rq *rq) |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 38 | { |
Peter Zijlstra | 6e2df05 | 2019-11-08 11:11:52 +0100 | [diff] [blame] | 39 | if (!sched_stop_runnable(rq)) |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 40 | return NULL; |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 41 | |
Peter Zijlstra | a0e813f | 2019-11-08 14:16:00 +0100 | [diff] [blame] | 42 | set_next_task_stop(rq, rq->stop, true); |
Peter Zijlstra | 6e2df05 | 2019-11-08 11:11:52 +0100 | [diff] [blame] | 43 | return rq->stop; |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | static void |
| 47 | enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) |
| 48 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 49 | add_nr_running(rq, 1); |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | static void |
| 53 | dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) |
| 54 | { |
Kirill Tkhai | 7246544 | 2014-05-09 03:00:14 +0400 | [diff] [blame] | 55 | sub_nr_running(rq, 1); |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | static void yield_task_stop(struct rq *rq) |
| 59 | { |
| 60 | BUG(); /* the stop task should never yield, its pointless. */ |
| 61 | } |
| 62 | |
Peter Zijlstra | 6e2df05 | 2019-11-08 11:11:52 +0100 | [diff] [blame] | 63 | static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 64 | { |
Mike Galbraith | 8f61896 | 2012-08-04 05:44:14 +0200 | [diff] [blame] | 65 | struct task_struct *curr = rq->curr; |
| 66 | u64 delta_exec; |
| 67 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 68 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; |
Mike Galbraith | 8f61896 | 2012-08-04 05:44:14 +0200 | [diff] [blame] | 69 | if (unlikely((s64)delta_exec < 0)) |
| 70 | delta_exec = 0; |
| 71 | |
| 72 | schedstat_set(curr->se.statistics.exec_max, |
| 73 | max(curr->se.statistics.exec_max, delta_exec)); |
| 74 | |
| 75 | curr->se.sum_exec_runtime += delta_exec; |
| 76 | account_group_exec_runtime(curr, delta_exec); |
| 77 | |
Frederic Weisbecker | 78becc2 | 2013-04-12 01:51:02 +0200 | [diff] [blame] | 78 | curr->se.exec_start = rq_clock_task(rq); |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 79 | cgroup_account_cputime(curr, delta_exec); |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 80 | } |
| 81 | |
Frederic Weisbecker | d84b313 | 2018-02-21 05:17:27 +0100 | [diff] [blame] | 82 | /* |
| 83 | * scheduler tick hitting a task of our scheduling class. |
| 84 | * |
| 85 | * NOTE: This function can be called remotely by the tick offload that |
| 86 | * goes along full dynticks. Therefore no local assumption can be made |
| 87 | * and everything must be accessed through the @rq and @curr passed in |
| 88 | * parameters. |
| 89 | */ |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 90 | static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) |
| 91 | { |
| 92 | } |
| 93 | |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 94 | static void switched_to_stop(struct rq *rq, struct task_struct *p) |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 95 | { |
| 96 | BUG(); /* its impossible to change to this class */ |
| 97 | } |
| 98 | |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 99 | static void |
| 100 | prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 101 | { |
| 102 | BUG(); /* how!?, what priority? */ |
| 103 | } |
| 104 | |
| 105 | static unsigned int |
| 106 | get_rr_interval_stop(struct rq *rq, struct task_struct *task) |
| 107 | { |
| 108 | return 0; |
| 109 | } |
| 110 | |
Thomas Gleixner | 90e362f | 2014-11-23 23:04:52 +0100 | [diff] [blame] | 111 | static void update_curr_stop(struct rq *rq) |
| 112 | { |
| 113 | } |
| 114 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 115 | /* |
| 116 | * Simple, special scheduling class for the per-CPU stop tasks: |
| 117 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 118 | const struct sched_class stop_sched_class = { |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 119 | .next = &dl_sched_class, |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 120 | |
| 121 | .enqueue_task = enqueue_task_stop, |
| 122 | .dequeue_task = dequeue_task_stop, |
| 123 | .yield_task = yield_task_stop, |
| 124 | |
| 125 | .check_preempt_curr = check_preempt_curr_stop, |
| 126 | |
| 127 | .pick_next_task = pick_next_task_stop, |
| 128 | .put_prev_task = put_prev_task_stop, |
Peter Zijlstra | 03b7fad | 2019-05-29 20:36:41 +0000 | [diff] [blame] | 129 | .set_next_task = set_next_task_stop, |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 130 | |
| 131 | #ifdef CONFIG_SMP |
Peter Zijlstra | 6e2df05 | 2019-11-08 11:11:52 +0100 | [diff] [blame] | 132 | .balance = balance_stop, |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 133 | .select_task_rq = select_task_rq_stop, |
Peter Zijlstra | c5b2803 | 2015-05-15 17:43:35 +0200 | [diff] [blame] | 134 | .set_cpus_allowed = set_cpus_allowed_common, |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 135 | #endif |
| 136 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 137 | .task_tick = task_tick_stop, |
| 138 | |
| 139 | .get_rr_interval = get_rr_interval_stop, |
| 140 | |
| 141 | .prio_changed = prio_changed_stop, |
| 142 | .switched_to = switched_to_stop, |
Thomas Gleixner | 90e362f | 2014-11-23 23:04:52 +0100 | [diff] [blame] | 143 | .update_curr = update_curr_stop, |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 144 | }; |