blob: e6ba5e31c7ca24e67bb3f56fbe8f422371c8fb74 [file] [log] [blame]
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01002 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02003 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/kallsyms.h>
17#include <linux/utsname.h>
Ingo Molnarb32e86b2013-10-07 11:29:30 +010018#include <linux/mempolicy.h>
Ingo Molnar43ae34c2007-07-09 18:52:00 +020019
Peter Zijlstra029632f2011-10-25 10:00:11 +020020#include "sched.h"
21
Bharata B Raoefe25c22011-01-11 15:41:54 +053022static DEFINE_SPINLOCK(sched_debug_lock);
23
Ingo Molnar43ae34c2007-07-09 18:52:00 +020024/*
25 * This allows printing both to /proc/sched_debug and
26 * to the console
27 */
28#define SEQ_printf(m, x...) \
29 do { \
30 if (m) \
31 seq_printf(m, x); \
32 else \
33 printk(x); \
34 } while (0)
35
Ingo Molnaref83a572007-10-15 17:00:08 +020036/*
37 * Ease the printing of nsec fields:
38 */
Ingo Molnar90b26282007-12-30 17:24:35 +010039static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020040{
Ingo Molnar90b26282007-12-30 17:24:35 +010041 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020042 nsec = -nsec;
43 do_div(nsec, 1000000);
44 return -nsec;
45 }
46 do_div(nsec, 1000000);
47
48 return nsec;
49}
50
Ingo Molnar90b26282007-12-30 17:24:35 +010051static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020052{
Ingo Molnar90b26282007-12-30 17:24:35 +010053 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020054 nsec = -nsec;
55
56 return do_div(nsec, 1000000);
57}
58
59#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
60
Bharata B Raoff9b48c2008-11-10 21:34:09 +053061#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +010062static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +053063{
64 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +053065
66#define P(F) \
67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
68#define PN(F) \
69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
70
Ben Segall18bf2802012-10-04 12:51:20 +020071 if (!se) {
72 struct sched_avg *avg = &cpu_rq(cpu)->avg;
73 P(avg->runnable_avg_sum);
74 P(avg->runnable_avg_period);
75 return;
76 }
77
78
Bharata B Raoff9b48c2008-11-10 21:34:09 +053079 PN(se->exec_start);
80 PN(se->vruntime);
81 PN(se->sum_exec_runtime);
82#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -030083 PN(se->statistics.wait_start);
84 PN(se->statistics.sleep_start);
85 PN(se->statistics.block_start);
86 PN(se->statistics.sleep_max);
87 PN(se->statistics.block_max);
88 PN(se->statistics.exec_max);
89 PN(se->statistics.slice_max);
90 PN(se->statistics.wait_max);
91 PN(se->statistics.wait_sum);
92 P(se->statistics.wait_count);
Bharata B Raoff9b48c2008-11-10 21:34:09 +053093#endif
94 P(se->load.weight);
Paul Turner9d85f212012-10-04 13:18:29 +020095#ifdef CONFIG_SMP
96 P(se->avg.runnable_avg_sum);
97 P(se->avg.runnable_avg_period);
Paul Turner2dac7542012-10-04 13:18:30 +020098 P(se->avg.load_avg_contrib);
Paul Turner9ee474f2012-10-04 13:18:30 +020099 P(se->avg.decay_count);
Paul Turner9d85f212012-10-04 13:18:29 +0200100#endif
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530101#undef PN
102#undef P
103}
104#endif
105
Bharata B Raoefe25c22011-01-11 15:41:54 +0530106#ifdef CONFIG_CGROUP_SCHED
107static char group_path[PATH_MAX];
108
109static char *task_group_path(struct task_group *tg)
110{
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530111 if (autogroup_path(tg, group_path, PATH_MAX))
112 return group_path;
113
Bharata B Raoefe25c22011-01-11 15:41:54 +0530114 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
115 return group_path;
116}
117#endif
118
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200119static void
Ingo Molnara48da482007-08-09 11:16:51 +0200120print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200121{
122 if (rq->curr == p)
123 SEQ_printf(m, "R");
124 else
125 SEQ_printf(m, " ");
126
Ingo Molnaref83a572007-10-15 17:00:08 +0200127 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200128 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200129 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200130 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100131 p->prio);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200132#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200133 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
Ingo Molnaref83a572007-10-15 17:00:08 +0200134 SPLIT_NS(p->se.vruntime),
135 SPLIT_NS(p->se.sum_exec_runtime),
Lucas De Marchi41acab82010-03-10 23:37:45 -0300136 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200137#else
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200138 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
Ingo Molnaref83a572007-10-15 17:00:08 +0200139 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200140#endif
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100141#ifdef CONFIG_NUMA_BALANCING
142 SEQ_printf(m, " %d", cpu_to_node(task_cpu(p)));
143#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530144#ifdef CONFIG_CGROUP_SCHED
145 SEQ_printf(m, " %s", task_group_path(task_group(p)));
146#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200147
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200148 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200149}
150
Ingo Molnara48da482007-08-09 11:16:51 +0200151static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200152{
153 struct task_struct *g, *p;
Peter Zijlstraab63a632007-10-25 14:02:45 +0200154 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200155
156 SEQ_printf(m,
157 "\nrunnable tasks:\n"
Mike Galbraithc86da3a2007-10-15 17:00:08 +0200158 " task PID tree-key switches prio"
159 " exec-runtime sum-exec sum-sleep\n"
Ingo Molnar1a75b942007-10-15 17:00:08 +0200160 "------------------------------------------------------"
Mike Galbraithc86da3a2007-10-15 17:00:08 +0200161 "----------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200162
Peter Zijlstraab63a632007-10-25 14:02:45 +0200163 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200164
165 do_each_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100166 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200167 continue;
168
Ingo Molnara48da482007-08-09 11:16:51 +0200169 print_task(m, rq, p);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200170 } while_each_thread(g, p);
171
Peter Zijlstraab63a632007-10-25 14:02:45 +0200172 read_unlock_irqrestore(&tasklist_lock, flags);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200173}
174
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200175void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200176{
Ingo Molnar86d95602007-10-15 17:00:06 +0200177 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
178 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900179 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200180 struct sched_entity *last;
181 unsigned long flags;
182
Bharata B Raoefe25c22011-01-11 15:41:54 +0530183#ifdef CONFIG_FAIR_GROUP_SCHED
184 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
185#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200186 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530187#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200188 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
189 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200190
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100191 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200192 if (cfs_rq->rb_leftmost)
Rik van Rielac53db52011-02-01 09:51:03 -0500193 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200194 last = __pick_last_entity(cfs_rq);
195 if (last)
196 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100197 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900198 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100199 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200200 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
201 SPLIT_NS(MIN_vruntime));
202 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
203 SPLIT_NS(min_vruntime));
204 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
205 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200206 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200207 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
208 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200209 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200210 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
211 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100212 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200213 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200214 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800215 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200216#ifdef CONFIG_SMP
Alex Shi72a4cf22013-06-20 10:18:53 +0800217 SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg",
Paul Turner2dac7542012-10-04 13:18:30 +0200218 cfs_rq->runnable_load_avg);
Alex Shi72a4cf22013-06-20 10:18:53 +0800219 SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
Paul Turner9ee474f2012-10-04 13:18:30 +0200220 cfs_rq->blocked_load_avg);
Alex Shi333bb862013-06-28 19:10:35 +0800221#ifdef CONFIG_FAIR_GROUP_SCHED
Alex Shibf5b9862013-06-20 10:18:54 +0800222 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
Paul Turnerc566e8e2012-10-04 13:18:30 +0200223 cfs_rq->tg_load_contrib);
Paul Turnerbb17f652012-10-04 13:18:31 +0200224 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
225 cfs_rq->tg_runnable_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800226 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
227 atomic_long_read(&cfs_rq->tg->load_avg));
Paul Turnerbb17f652012-10-04 13:18:31 +0200228 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
229 atomic_read(&cfs_rq->tg->runnable_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200230#endif
Alex Shi333bb862013-06-28 19:10:35 +0800231#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800232
Alex Shi333bb862013-06-28 19:10:35 +0800233#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530234 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200235#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200236}
237
Peter Zijlstraada18de2008-06-19 14:22:24 +0200238void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
239{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530240#ifdef CONFIG_RT_GROUP_SCHED
241 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
242#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200243 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530244#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200245
246#define P(x) \
247 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
248#define PN(x) \
249 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
250
251 P(rt_nr_running);
252 P(rt_throttled);
253 PN(rt_time);
254 PN(rt_runtime);
255
256#undef PN
257#undef P
258}
259
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100260extern __read_mostly int sched_clock_running;
261
Ingo Molnara48da482007-08-09 11:16:51 +0200262static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200263{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900264 struct rq *rq = cpu_rq(cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530265 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200266
267#ifdef CONFIG_X86
268 {
269 unsigned int freq = cpu_khz ? : 1;
270
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800271 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200272 cpu, freq / 1000, (freq % 1000));
273 }
274#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800275 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200276#endif
277
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200278#define P(x) \
279do { \
280 if (sizeof(rq->x) == 4) \
281 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
282 else \
283 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
284} while (0)
285
Ingo Molnaref83a572007-10-15 17:00:08 +0200286#define PN(x) \
287 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200288
289 P(nr_running);
290 SEQ_printf(m, " .%-30s: %lu\n", "load",
Dmitry Adamushko495eca42007-10-15 17:00:06 +0200291 rq->load.weight);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200292 P(nr_switches);
293 P(nr_load_updates);
294 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200295 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200296 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200297 PN(clock);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200298 P(cpu_load[0]);
299 P(cpu_load[1]);
300 P(cpu_load[2]);
301 P(cpu_load[3]);
302 P(cpu_load[4]);
303#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200304#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200305
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100306#ifdef CONFIG_SCHEDSTATS
307#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100308#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100309
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100310 P(yld_count);
311
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100312 P(sched_count);
313 P(sched_goidle);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100314#ifdef CONFIG_SMP
315 P64(avg_idle);
316#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100317
318 P(ttwu_count);
319 P(ttwu_local);
320
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100321#undef P
Yong Zhangfce20972011-01-14 15:57:39 +0800322#undef P64
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100323#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530324 spin_lock_irqsave(&sched_debug_lock, flags);
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200325 print_cfs_stats(m, cpu);
Peter Zijlstraada18de2008-06-19 14:22:24 +0200326 print_rt_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200327
Bharata B Raoefe25c22011-01-11 15:41:54 +0530328 rcu_read_lock();
Ingo Molnara48da482007-08-09 11:16:51 +0200329 print_rq(m, rq, cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530330 rcu_read_unlock();
331 spin_unlock_irqrestore(&sched_debug_lock, flags);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800332 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200333}
334
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100335static const char *sched_tunable_scaling_names[] = {
336 "none",
337 "logaritmic",
338 "linear"
339};
340
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800341static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200342{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100343 u64 ktime, sched_clk, cpu_clk;
344 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200345
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100346 local_irq_save(flags);
347 ktime = ktime_to_ns(ktime_get());
348 sched_clk = sched_clock();
349 cpu_clk = local_clock();
350 local_irq_restore(flags);
351
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100352 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200353 init_utsname()->release,
354 (int)strcspn(init_utsname()->version, " "),
355 init_utsname()->version);
356
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100357#define P(x) \
358 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
359#define PN(x) \
360 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
361 PN(ktime);
362 PN(sched_clk);
363 PN(cpu_clk);
364 P(jiffies);
365#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
366 P(sched_clock_stable);
367#endif
368#undef PN
369#undef P
370
371 SEQ_printf(m, "\n");
372 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200373
Ingo Molnar1aa47312007-10-15 17:00:10 +0200374#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200375 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200376#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200377 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200378 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100379 PN(sysctl_sched_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200380 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700381 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200382 P(sysctl_sched_features);
383#undef PN
384#undef P
385
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800386 SEQ_printf(m, " .%-40s: %d (%s)\n",
387 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100388 sysctl_sched_tunable_scaling,
389 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200390 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800391}
392
393static int sched_debug_show(struct seq_file *m, void *v)
394{
395 int cpu = (unsigned long)(v - 2);
396
397 if (cpu != -1)
398 print_cpu(m, cpu);
399 else
400 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200401
402 return 0;
403}
404
Peter Zijlstra029632f2011-10-25 10:00:11 +0200405void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200406{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800407 int cpu;
408
409 sched_debug_header(NULL);
410 for_each_online_cpu(cpu)
411 print_cpu(NULL, cpu);
412
413}
414
415/*
416 * This itererator needs some explanation.
417 * It returns 1 for the header position.
418 * This means 2 is cpu 0.
419 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
420 * to use cpumask_* to iterate over the cpus.
421 */
422static void *sched_debug_start(struct seq_file *file, loff_t *offset)
423{
424 unsigned long n = *offset;
425
426 if (n == 0)
427 return (void *) 1;
428
429 n--;
430
431 if (n > 0)
432 n = cpumask_next(n - 1, cpu_online_mask);
433 else
434 n = cpumask_first(cpu_online_mask);
435
436 *offset = n + 1;
437
438 if (n < nr_cpu_ids)
439 return (void *)(unsigned long)(n + 2);
440 return NULL;
441}
442
443static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
444{
445 (*offset)++;
446 return sched_debug_start(file, offset);
447}
448
449static void sched_debug_stop(struct seq_file *file, void *data)
450{
451}
452
453static const struct seq_operations sched_debug_sops = {
454 .start = sched_debug_start,
455 .next = sched_debug_next,
456 .stop = sched_debug_stop,
457 .show = sched_debug_show,
458};
459
460static int sched_debug_release(struct inode *inode, struct file *file)
461{
462 seq_release(inode, file);
463
464 return 0;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200465}
466
467static int sched_debug_open(struct inode *inode, struct file *filp)
468{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800469 int ret = 0;
470
471 ret = seq_open(filp, &sched_debug_sops);
472
473 return ret;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200474}
475
Arjan van de Ven0dbee3a2007-10-15 17:00:19 +0200476static const struct file_operations sched_debug_fops = {
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200477 .open = sched_debug_open,
478 .read = seq_read,
479 .llseek = seq_lseek,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800480 .release = sched_debug_release,
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200481};
482
483static int __init init_sched_debug_procfs(void)
484{
485 struct proc_dir_entry *pe;
486
Li Zefana9cf4dd2008-10-30 15:23:34 +0800487 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200488 if (!pe)
489 return -ENOMEM;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200490 return 0;
491}
492
493__initcall(init_sched_debug_procfs);
494
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100495#define __P(F) \
496 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
497#define P(F) \
498 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
499#define __PN(F) \
500 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
501#define PN(F) \
502 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
503
504
505static void sched_show_numa(struct task_struct *p, struct seq_file *m)
506{
507#ifdef CONFIG_NUMA_BALANCING
508 struct mempolicy *pol;
509 int node, i;
510
511 if (p->mm)
512 P(mm->numa_scan_seq);
513
514 task_lock(p);
515 pol = p->mempolicy;
516 if (pol && !(pol->flags & MPOL_F_MORON))
517 pol = NULL;
518 mpol_get(pol);
519 task_unlock(p);
520
521 SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
522
523 for_each_online_node(node) {
524 for (i = 0; i < 2; i++) {
525 unsigned long nr_faults = -1;
526 int cpu_current, home_node;
527
528 if (p->numa_faults)
529 nr_faults = p->numa_faults[2*node + i];
530
531 cpu_current = !i ? (task_node(p) == node) :
532 (pol && node_isset(node, pol->v.nodes));
533
534 home_node = (p->numa_preferred_nid == node);
535
536 SEQ_printf(m, "numa_faults, %d, %d, %d, %d, %ld\n",
537 i, node, cpu_current, home_node, nr_faults);
538 }
539 }
540
541 mpol_put(pol);
542#endif
543}
544
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200545void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
546{
Ingo Molnarcc367732007-10-15 17:00:18 +0200547 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200548
Peter Zijlstrafc840912013-09-09 13:01:41 +0200549 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700550 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200551 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530552 "---------------------------------------------------------"
553 "----------\n");
Ingo Molnarcc367732007-10-15 17:00:18 +0200554#define __P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530555 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200556#define P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530557 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
Ingo Molnarcc367732007-10-15 17:00:18 +0200558#define __PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530559 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
Ingo Molnaref83a572007-10-15 17:00:08 +0200560#define PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530561 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200562
Ingo Molnaref83a572007-10-15 17:00:08 +0200563 PN(se.exec_start);
564 PN(se.vruntime);
565 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200566
Ingo Molnarcc367732007-10-15 17:00:18 +0200567 nr_switches = p->nvcsw + p->nivcsw;
568
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200569#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -0300570 PN(se.statistics.wait_start);
571 PN(se.statistics.sleep_start);
572 PN(se.statistics.block_start);
573 PN(se.statistics.sleep_max);
574 PN(se.statistics.block_max);
575 PN(se.statistics.exec_max);
576 PN(se.statistics.slice_max);
577 PN(se.statistics.wait_max);
578 PN(se.statistics.wait_sum);
579 P(se.statistics.wait_count);
580 PN(se.statistics.iowait_sum);
581 P(se.statistics.iowait_count);
Ingo Molnarcc367732007-10-15 17:00:18 +0200582 P(se.nr_migrations);
Lucas De Marchi41acab82010-03-10 23:37:45 -0300583 P(se.statistics.nr_migrations_cold);
584 P(se.statistics.nr_failed_migrations_affine);
585 P(se.statistics.nr_failed_migrations_running);
586 P(se.statistics.nr_failed_migrations_hot);
587 P(se.statistics.nr_forced_migrations);
588 P(se.statistics.nr_wakeups);
589 P(se.statistics.nr_wakeups_sync);
590 P(se.statistics.nr_wakeups_migrate);
591 P(se.statistics.nr_wakeups_local);
592 P(se.statistics.nr_wakeups_remote);
593 P(se.statistics.nr_wakeups_affine);
594 P(se.statistics.nr_wakeups_affine_attempts);
595 P(se.statistics.nr_wakeups_passive);
596 P(se.statistics.nr_wakeups_idle);
Ingo Molnarcc367732007-10-15 17:00:18 +0200597
598 {
599 u64 avg_atom, avg_per_cpu;
600
601 avg_atom = p->se.sum_exec_runtime;
602 if (nr_switches)
603 do_div(avg_atom, nr_switches);
604 else
605 avg_atom = -1LL;
606
607 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a89742007-11-28 15:52:56 +0100608 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700609 avg_per_cpu = div64_u64(avg_per_cpu,
610 p->se.nr_migrations);
Ingo Molnarc1a89742007-11-28 15:52:56 +0100611 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +0200612 avg_per_cpu = -1LL;
Ingo Molnarc1a89742007-11-28 15:52:56 +0100613 }
Ingo Molnarcc367732007-10-15 17:00:18 +0200614
615 __PN(avg_atom);
616 __PN(avg_per_cpu);
617 }
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200618#endif
Ingo Molnarcc367732007-10-15 17:00:18 +0200619 __P(nr_switches);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530620 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200621 "nr_voluntary_switches", (long long)p->nvcsw);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530622 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200623 "nr_involuntary_switches", (long long)p->nivcsw);
624
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200625 P(se.load.weight);
Alex Shi333bb862013-06-28 19:10:35 +0800626#ifdef CONFIG_SMP
Kamalesh Babulal939fd732013-06-25 13:33:36 +0530627 P(se.avg.runnable_avg_sum);
628 P(se.avg.runnable_avg_period);
629 P(se.avg.load_avg_contrib);
630 P(se.avg.decay_count);
631#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200632 P(policy);
633 P(prio);
Ingo Molnaref83a572007-10-15 17:00:08 +0200634#undef PN
Ingo Molnarcc367732007-10-15 17:00:18 +0200635#undef __PN
636#undef P
637#undef __P
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200638
639 {
Ingo Molnar29d7b902008-11-16 08:07:15 +0100640 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200641 u64 t0, t1;
642
Ingo Molnar29d7b902008-11-16 08:07:15 +0100643 t0 = cpu_clock(this_cpu);
644 t1 = cpu_clock(this_cpu);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530645 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200646 "clock-delta", (long long)(t1-t0));
647 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100648
649 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200650}
651
652void proc_sched_set_task(struct task_struct *p)
653{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200654#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -0300655 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200656#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200657}