Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2 | |
| 3 | #ifdef CONFIG_SCHEDSTATS |
Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 4 | |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5 | /* |
| 6 | * Expects runqueue lock to be held for atomicity of update |
| 7 | */ |
| 8 | static inline void |
| 9 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| 10 | { |
| 11 | if (rq) { |
| 12 | rq->rq_sched_info.run_delay += delta; |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 13 | rq->rq_sched_info.pcount++; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 14 | } |
| 15 | } |
| 16 | |
| 17 | /* |
| 18 | * Expects runqueue lock to be held for atomicity of update |
| 19 | */ |
| 20 | static inline void |
| 21 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
| 22 | { |
| 23 | if (rq) |
Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 24 | rq->rq_cpu_time += delta; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 25 | } |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 26 | |
| 27 | static inline void |
| 28 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
| 29 | { |
| 30 | if (rq) |
| 31 | rq->rq_sched_info.run_delay += delta; |
| 32 | } |
Josh Poimboeuf | ae92882 | 2016-06-17 12:43:24 -0500 | [diff] [blame] | 33 | #define schedstat_enabled() static_branch_unlikely(&sched_schedstats) |
| 34 | #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) |
| 35 | #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) |
| 36 | #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) |
Josh Poimboeuf | 20e1d48 | 2016-06-17 12:43:25 -0500 | [diff] [blame] | 37 | #define schedstat_val(var) (var) |
| 38 | #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) |
Josh Poimboeuf | 9c57259 | 2016-06-03 17:58:40 -0500 | [diff] [blame] | 39 | |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 40 | #else /* !CONFIG_SCHEDSTATS */ |
| 41 | static inline void |
| 42 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| 43 | {} |
| 44 | static inline void |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 45 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
| 46 | {} |
| 47 | static inline void |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 48 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
| 49 | {} |
Josh Poimboeuf | ae92882 | 2016-06-17 12:43:24 -0500 | [diff] [blame] | 50 | #define schedstat_enabled() 0 |
| 51 | #define schedstat_inc(var) do { } while (0) |
| 52 | #define schedstat_add(var, amt) do { } while (0) |
| 53 | #define schedstat_set(var, val) do { } while (0) |
| 54 | #define schedstat_val(var) 0 |
Josh Poimboeuf | 20e1d48 | 2016-06-17 12:43:25 -0500 | [diff] [blame] | 55 | #define schedstat_val_or_zero(var) 0 |
Josh Poimboeuf | ae92882 | 2016-06-17 12:43:24 -0500 | [diff] [blame] | 56 | #endif /* CONFIG_SCHEDSTATS */ |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 57 | |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 58 | #ifdef CONFIG_SCHED_INFO |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 59 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
| 60 | { |
| 61 | t->sched_info.last_queued = 0; |
| 62 | } |
| 63 | |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 64 | /* |
Rakib Mullick | d4a6f3c | 2010-10-24 16:28:47 +0600 | [diff] [blame] | 65 | * We are interested in knowing how long it was from the *first* time a |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 66 | * task was queued to the time that it finally hit a cpu, we call this routine |
| 67 | * from dequeue_task() to account for possible rq->clock skew across cpus. The |
| 68 | * delta taken on each cpu would annul the skew. |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 69 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 70 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 71 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 72 | unsigned long long now = rq_clock(rq), delta = 0; |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 73 | |
| 74 | if (unlikely(sched_info_on())) |
| 75 | if (t->sched_info.last_queued) |
| 76 | delta = now - t->sched_info.last_queued; |
| 77 | sched_info_reset_dequeued(t); |
| 78 | t->sched_info.run_delay += delta; |
| 79 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 80 | rq_sched_info_dequeued(rq, delta); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | /* |
| 84 | * Called when a task finally hits the cpu. We can now calculate how |
| 85 | * long it was waiting to run. We also note when it began so that we |
| 86 | * can keep stats on how long its timeslice is. |
| 87 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 88 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 89 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 90 | unsigned long long now = rq_clock(rq), delta = 0; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 91 | |
| 92 | if (t->sched_info.last_queued) |
| 93 | delta = now - t->sched_info.last_queued; |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 94 | sched_info_reset_dequeued(t); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 95 | t->sched_info.run_delay += delta; |
| 96 | t->sched_info.last_arrival = now; |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 97 | t->sched_info.pcount++; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 98 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 99 | rq_sched_info_arrive(rq, delta); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | /* |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 103 | * This function is only called from enqueue_task(), but also only updates |
| 104 | * the timestamp if it is already not set. It's assumed that |
| 105 | * sched_info_dequeued() will clear that stamp when appropriate. |
| 106 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 107 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 108 | { |
| 109 | if (unlikely(sched_info_on())) |
| 110 | if (!t->sched_info.last_queued) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 111 | t->sched_info.last_queued = rq_clock(rq); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | /* |
Michael S. Tsirkin | 13b62e4 | 2013-09-16 11:30:36 +0300 | [diff] [blame] | 115 | * Called when a process ceases being the active-running process involuntarily |
| 116 | * due, typically, to expiring its time slice (this may also be called when |
| 117 | * switching to the idle task). Now we can calculate how long we ran. |
Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 118 | * Also, if the process is still in the TASK_RUNNING state, call |
| 119 | * sched_info_queued() to mark that it has now again started waiting on |
| 120 | * the runqueue. |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 121 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 122 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 123 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 124 | unsigned long long delta = rq_clock(rq) - |
Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 125 | t->sched_info.last_arrival; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 126 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 127 | rq_sched_info_depart(rq, delta); |
Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 128 | |
| 129 | if (t->state == TASK_RUNNING) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 130 | sched_info_queued(rq, t); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Called when tasks are switched involuntarily due, typically, to expiring |
| 135 | * their time slice. (This may also be called when switching to or from |
| 136 | * the idle task.) We are only called when prev != next. |
| 137 | */ |
| 138 | static inline void |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 139 | __sched_info_switch(struct rq *rq, |
| 140 | struct task_struct *prev, struct task_struct *next) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 141 | { |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 142 | /* |
| 143 | * prev now departs the cpu. It's not interesting to record |
| 144 | * stats about how efficient we were at scheduling the idle |
| 145 | * process, however. |
| 146 | */ |
| 147 | if (prev != rq->idle) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 148 | sched_info_depart(rq, prev); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 149 | |
| 150 | if (next != rq->idle) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 151 | sched_info_arrive(rq, next); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 152 | } |
| 153 | static inline void |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 154 | sched_info_switch(struct rq *rq, |
| 155 | struct task_struct *prev, struct task_struct *next) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 156 | { |
| 157 | if (unlikely(sched_info_on())) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 158 | __sched_info_switch(rq, prev, next); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 159 | } |
| 160 | #else |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 161 | #define sched_info_queued(rq, t) do { } while (0) |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 162 | #define sched_info_reset_dequeued(t) do { } while (0) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 163 | #define sched_info_dequeued(rq, t) do { } while (0) |
| 164 | #define sched_info_depart(rq, t) do { } while (0) |
| 165 | #define sched_info_arrive(rq, next) do { } while (0) |
| 166 | #define sched_info_switch(rq, t, next) do { } while (0) |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 167 | #endif /* CONFIG_SCHED_INFO */ |