blob: 0655524700d24d8e5ab6101452acb6c9b5fdea27 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnar43ae34c2007-07-09 18:52:00 +02002/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01003 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02004 *
Ingo Molnar325ea102018-03-03 12:20:47 +01005 * Print the CFS rbtree and other debugging details
Ingo Molnar43ae34c2007-07-09 18:52:00 +02006 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
Ingo Molnar43ae34c2007-07-09 18:52:00 +02008 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02009#include "sched.h"
10
Bharata B Raoefe25c22011-01-11 15:41:54 +053011static DEFINE_SPINLOCK(sched_debug_lock);
12
Ingo Molnar43ae34c2007-07-09 18:52:00 +020013/*
14 * This allows printing both to /proc/sched_debug and
15 * to the console
16 */
17#define SEQ_printf(m, x...) \
18 do { \
19 if (m) \
20 seq_printf(m, x); \
21 else \
Joe Lawrencea8c024c2018-03-19 14:35:54 -040022 pr_cont(x); \
Ingo Molnar43ae34c2007-07-09 18:52:00 +020023 } while (0)
24
Ingo Molnaref83a572007-10-15 17:00:08 +020025/*
26 * Ease the printing of nsec fields:
27 */
Ingo Molnar90b26282007-12-30 17:24:35 +010028static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020029{
Ingo Molnar90b26282007-12-30 17:24:35 +010030 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020031 nsec = -nsec;
32 do_div(nsec, 1000000);
33 return -nsec;
34 }
35 do_div(nsec, 1000000);
36
37 return nsec;
38}
39
Ingo Molnar90b26282007-12-30 17:24:35 +010040static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020041{
Ingo Molnar90b26282007-12-30 17:24:35 +010042 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020043 nsec = -nsec;
44
45 return do_div(nsec, 1000000);
46}
47
48#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050050#define SCHED_FEAT(name, enabled) \
51 #name ,
52
53static const char * const sched_feat_names[] = {
54#include "features.h"
55};
56
57#undef SCHED_FEAT
58
59static int sched_feat_show(struct seq_file *m, void *v)
60{
61 int i;
62
63 for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 if (!(sysctl_sched_features & (1UL << i)))
65 seq_puts(m, "NO_");
66 seq_printf(m, "%s ", sched_feat_names[i]);
67 }
68 seq_puts(m, "\n");
69
70 return 0;
71}
72
Masahiro Yamadae9666d12018-12-31 00:14:15 +090073#ifdef CONFIG_JUMP_LABEL
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050074
75#define jump_label_key__true STATIC_KEY_INIT_TRUE
76#define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78#define SCHED_FEAT(name, enabled) \
79 jump_label_key__##enabled ,
80
81struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82#include "features.h"
83};
84
85#undef SCHED_FEAT
86
87static void sched_feat_disable(int i)
88{
Jiada Wange73e8192018-07-31 21:12:22 +090089 static_key_disable_cpuslocked(&sched_feat_keys[i]);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050090}
91
92static void sched_feat_enable(int i)
93{
Jiada Wange73e8192018-07-31 21:12:22 +090094 static_key_enable_cpuslocked(&sched_feat_keys[i]);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050095}
96#else
97static void sched_feat_disable(int i) { };
98static void sched_feat_enable(int i) { };
Masahiro Yamadae9666d12018-12-31 00:14:15 +090099#endif /* CONFIG_JUMP_LABEL */
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500100
101static int sched_feat_set(char *cmp)
102{
103 int i;
104 int neg = 0;
105
106 if (strncmp(cmp, "NO_", 3) == 0) {
107 neg = 1;
108 cmp += 3;
109 }
110
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 if (i < 0)
113 return i;
114
115 if (neg) {
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
118 } else {
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500121 }
122
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800123 return 0;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500124}
125
126static ssize_t
127sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
129{
130 char buf[64];
131 char *cmp;
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800132 int ret;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500133 struct inode *inode;
134
135 if (cnt > 63)
136 cnt = 63;
137
138 if (copy_from_user(&buf, ubuf, cnt))
139 return -EFAULT;
140
141 buf[cnt] = 0;
142 cmp = strstrip(buf);
143
144 /* Ensure the static_key remains in a consistent state */
145 inode = file_inode(filp);
Jiada Wange73e8192018-07-31 21:12:22 +0900146 cpus_read_lock();
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500147 inode_lock(inode);
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800148 ret = sched_feat_set(cmp);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500149 inode_unlock(inode);
Jiada Wange73e8192018-07-31 21:12:22 +0900150 cpus_read_unlock();
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800151 if (ret < 0)
152 return ret;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500153
154 *ppos += cnt;
155
156 return cnt;
157}
158
159static int sched_feat_open(struct inode *inode, struct file *filp)
160{
161 return single_open(filp, sched_feat_show, NULL);
162}
163
164static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170};
171
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200172__read_mostly bool sched_debug_enabled;
173
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500174static __init int sched_init_debug(void)
175{
176 debugfs_create_file("sched_features", 0644, NULL, NULL,
177 &sched_feat_fops);
178
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200179 debugfs_create_bool("sched_debug", 0644, NULL,
180 &sched_debug_enabled);
181
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500182 return 0;
183}
184late_initcall(sched_init_debug);
185
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500186#ifdef CONFIG_SMP
187
188#ifdef CONFIG_SYSCTL
189
190static struct ctl_table sd_ctl_dir[] = {
191 {
192 .procname = "sched_domain",
193 .mode = 0555,
194 },
195 {}
196};
197
198static struct ctl_table sd_ctl_root[] = {
199 {
200 .procname = "kernel",
201 .mode = 0555,
202 .child = sd_ctl_dir,
203 },
204 {}
205};
206
207static struct ctl_table *sd_alloc_ctl_entry(int n)
208{
209 struct ctl_table *entry =
210 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
211
212 return entry;
213}
214
215static void sd_free_ctl_entry(struct ctl_table **tablep)
216{
217 struct ctl_table *entry;
218
219 /*
220 * In the intermediate directories, both the child directory and
221 * procname are dynamically allocated and could fail but the mode
222 * will always be set. In the lowest directory the names are
223 * static strings and all have proc handlers.
224 */
225 for (entry = *tablep; entry->mode; entry++) {
226 if (entry->child)
227 sd_free_ctl_entry(&entry->child);
228 if (entry->proc_handler == NULL)
229 kfree(entry->procname);
230 }
231
232 kfree(*tablep);
233 *tablep = NULL;
234}
235
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500236static void
237set_table_entry(struct ctl_table *entry,
238 const char *procname, void *data, int maxlen,
Dietmar Eggemann3d8d5352019-05-27 07:21:12 +0100239 umode_t mode, proc_handler *proc_handler)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500240{
241 entry->procname = procname;
242 entry->data = data;
243 entry->maxlen = maxlen;
244 entry->mode = mode;
245 entry->proc_handler = proc_handler;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500246}
247
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100248static int sd_ctl_doflags(struct ctl_table *table, int write,
249 void *buffer, size_t *lenp, loff_t *ppos)
250{
251 unsigned long flags = *(unsigned long *)table->data;
252 size_t data_size = 0;
253 size_t len = 0;
254 char *tmp;
255 int idx;
256
257 if (write)
258 return 0;
259
260 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
261 char *name = sd_flag_debug[idx].name;
262
263 /* Name plus whitespace */
264 data_size += strlen(name) + 1;
265 }
266
267 if (*ppos > data_size) {
268 *lenp = 0;
269 return 0;
270 }
271
272 tmp = kcalloc(data_size + 1, sizeof(*tmp), GFP_KERNEL);
273 if (!tmp)
274 return -ENOMEM;
275
276 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
277 char *name = sd_flag_debug[idx].name;
278
279 len += snprintf(tmp + len, strlen(name) + 2, "%s ", name);
280 }
281
282 tmp += *ppos;
283 len -= *ppos;
284
285 if (len > *lenp)
286 len = *lenp;
287 if (len)
288 memcpy(buffer, tmp, len);
289 if (len < *lenp) {
290 ((char *)buffer)[len] = '\n';
291 len++;
292 }
293
294 *lenp = len;
295 *ppos += len;
296
297 kfree(tmp);
298
299 return 0;
300}
301
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500302static struct ctl_table *
303sd_alloc_ctl_domain_table(struct sched_domain *sd)
304{
Dietmar Eggemann0e1fef62019-05-27 07:21:14 +0100305 struct ctl_table *table = sd_alloc_ctl_entry(9);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500306
307 if (table == NULL)
308 return NULL;
309
Dietmar Eggemann0e1fef62019-05-27 07:21:14 +0100310 set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
311 set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
312 set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
313 set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
314 set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100315 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, sd_ctl_doflags);
Dietmar Eggemann0e1fef62019-05-27 07:21:14 +0100316 set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
317 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
318 /* &table[8] is terminator */
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500319
320 return table;
321}
322
323static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
324{
325 struct ctl_table *entry, *table;
326 struct sched_domain *sd;
327 int domain_num = 0, i;
328 char buf[32];
329
330 for_each_domain(cpu, sd)
331 domain_num++;
332 entry = table = sd_alloc_ctl_entry(domain_num + 1);
333 if (table == NULL)
334 return NULL;
335
336 i = 0;
337 for_each_domain(cpu, sd) {
338 snprintf(buf, 32, "domain%d", i);
339 entry->procname = kstrdup(buf, GFP_KERNEL);
340 entry->mode = 0555;
341 entry->child = sd_alloc_ctl_domain_table(sd);
342 entry++;
343 i++;
344 }
345 return table;
346}
347
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100348static cpumask_var_t sd_sysctl_cpus;
349static struct ctl_table_header *sd_sysctl_header;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200350
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500351void register_sched_domain_sysctl(void)
352{
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200353 static struct ctl_table *cpu_entries;
354 static struct ctl_table **cpu_idx;
Hidetoshi Seto1ca4fa32019-01-29 10:12:45 -0500355 static bool init_done = false;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500356 char buf[32];
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200357 int i;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500358
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200359 if (!cpu_entries) {
360 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
361 if (!cpu_entries)
362 return;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500363
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200364 WARN_ON(sd_ctl_dir[0].child);
365 sd_ctl_dir[0].child = cpu_entries;
366 }
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500367
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200368 if (!cpu_idx) {
369 struct ctl_table *e = cpu_entries;
370
371 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
372 if (!cpu_idx)
373 return;
374
375 /* deal with sparse possible map */
376 for_each_possible_cpu(i) {
377 cpu_idx[i] = e;
378 e++;
379 }
380 }
381
382 if (!cpumask_available(sd_sysctl_cpus)) {
383 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
384 return;
Hidetoshi Seto1ca4fa32019-01-29 10:12:45 -0500385 }
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200386
Hidetoshi Seto1ca4fa32019-01-29 10:12:45 -0500387 if (!init_done) {
388 init_done = true;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200389 /* init to possible to not have holes in @cpu_entries */
390 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
391 }
392
393 for_each_cpu(i, sd_sysctl_cpus) {
394 struct ctl_table *e = cpu_idx[i];
395
396 if (e->child)
397 sd_free_ctl_entry(&e->child);
398
399 if (!e->procname) {
400 snprintf(buf, 32, "cpu%d", i);
401 e->procname = kstrdup(buf, GFP_KERNEL);
402 }
403 e->mode = 0555;
404 e->child = sd_alloc_ctl_cpu_table(i);
405
406 __cpumask_clear_cpu(i, sd_sysctl_cpus);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500407 }
408
409 WARN_ON(sd_sysctl_header);
410 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
411}
412
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200413void dirty_sched_domain_sysctl(int cpu)
414{
415 if (cpumask_available(sd_sysctl_cpus))
416 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
417}
418
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500419/* may be called multiple times per register */
420void unregister_sched_domain_sysctl(void)
421{
422 unregister_sysctl_table(sd_sysctl_header);
423 sd_sysctl_header = NULL;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500424}
425#endif /* CONFIG_SYSCTL */
426#endif /* CONFIG_SMP */
427
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530428#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +0100429static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530430{
431 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530432
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100433#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
434#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
435#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
436#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530437
Yuyang Ducd126af2015-07-15 08:04:36 +0800438 if (!se)
Ben Segall18bf2802012-10-04 12:51:20 +0200439 return;
Ben Segall18bf2802012-10-04 12:51:20 +0200440
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530441 PN(se->exec_start);
442 PN(se->vruntime);
443 PN(se->sum_exec_runtime);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100444
Mel Gormancb251762016-02-05 09:08:36 +0000445 if (schedstat_enabled()) {
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500446 PN_SCHEDSTAT(se->statistics.wait_start);
447 PN_SCHEDSTAT(se->statistics.sleep_start);
448 PN_SCHEDSTAT(se->statistics.block_start);
449 PN_SCHEDSTAT(se->statistics.sleep_max);
450 PN_SCHEDSTAT(se->statistics.block_max);
451 PN_SCHEDSTAT(se->statistics.exec_max);
452 PN_SCHEDSTAT(se->statistics.slice_max);
453 PN_SCHEDSTAT(se->statistics.wait_max);
454 PN_SCHEDSTAT(se->statistics.wait_sum);
455 P_SCHEDSTAT(se->statistics.wait_count);
Mel Gormancb251762016-02-05 09:08:36 +0000456 }
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100457
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530458 P(se->load.weight);
Paul Turner9d85f212012-10-04 13:18:29 +0200459#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800460 P(se->avg.load_avg);
461 P(se->avg.util_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000462 P(se->avg.runnable_avg);
Paul Turner9d85f212012-10-04 13:18:29 +0200463#endif
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500464
465#undef PN_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530466#undef PN
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500467#undef P_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530468#undef P
469}
470#endif
471
Bharata B Raoefe25c22011-01-11 15:41:54 +0530472#ifdef CONFIG_CGROUP_SCHED
473static char group_path[PATH_MAX];
474
475static char *task_group_path(struct task_group *tg)
476{
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530477 if (autogroup_path(tg, group_path, PATH_MAX))
478 return group_path;
479
Tejun Heo4c737b42016-08-10 11:23:44 -0400480 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100481
Tejun Heo4c737b42016-08-10 11:23:44 -0400482 return group_path;
Bharata B Raoefe25c22011-01-11 15:41:54 +0530483}
484#endif
485
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200486static void
Ingo Molnara48da482007-08-09 11:16:51 +0200487print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200488{
Xie XiuQi20435d82017-08-07 16:44:23 +0800489 if (rq->curr == p)
Xie XiuQie8c16492017-08-07 16:44:22 +0800490 SEQ_printf(m, ">R");
Xie XiuQi20435d82017-08-07 16:44:23 +0800491 else
492 SEQ_printf(m, " %c", task_state_to_char(p));
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200493
Xie XiuQif080d932020-04-14 20:57:21 +0800494 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200495 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200496 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200497 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100498 p->prio);
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500499
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530500 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500501 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530502 SPLIT_NS(p->se.sum_exec_runtime),
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500503 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500504
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100505#ifdef CONFIG_NUMA_BALANCING
Srikar Dronamrajue3d24d02015-06-25 22:51:42 +0530506 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100507#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530508#ifdef CONFIG_CGROUP_SCHED
509 SEQ_printf(m, " %s", task_group_path(task_group(p)));
510#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200511
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200512 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200513}
514
Ingo Molnara48da482007-08-09 11:16:51 +0200515static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200516{
517 struct task_struct *g, *p;
518
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400519 SEQ_printf(m, "\n");
520 SEQ_printf(m, "runnable tasks:\n");
Xie XiuQif080d932020-04-14 20:57:21 +0800521 SEQ_printf(m, " S task PID tree-key switches prio"
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400522 " wait-time sum-exec sum-sleep\n");
523 SEQ_printf(m, "-------------------------------------------------------"
Xie XiuQif080d932020-04-14 20:57:21 +0800524 "------------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200525
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200526 rcu_read_lock();
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200527 for_each_process_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100528 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200529 continue;
530
Ingo Molnara48da482007-08-09 11:16:51 +0200531 print_task(m, rq, p);
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200532 }
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200533 rcu_read_unlock();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200534}
535
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200536void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200537{
Ingo Molnar86d95602007-10-15 17:00:06 +0200538 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
539 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900540 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200541 struct sched_entity *last;
542 unsigned long flags;
543
Bharata B Raoefe25c22011-01-11 15:41:54 +0530544#ifdef CONFIG_FAIR_GROUP_SCHED
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400545 SEQ_printf(m, "\n");
546 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
Bharata B Raoefe25c22011-01-11 15:41:54 +0530547#else
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400548 SEQ_printf(m, "\n");
549 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530550#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200551 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
552 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200553
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100554 raw_spin_lock_irqsave(&rq->lock, flags);
Davidlohr Buesobfb06882017-09-08 16:14:55 -0700555 if (rb_first_cached(&cfs_rq->tasks_timeline))
Rik van Rielac53db52011-02-01 09:51:03 -0500556 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200557 last = __pick_last_entity(cfs_rq);
558 if (last)
559 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100560 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900561 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100562 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200563 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
564 SPLIT_NS(MIN_vruntime));
565 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
566 SPLIT_NS(min_vruntime));
567 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
568 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200569 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200570 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
571 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200572 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200573 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
574 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100575 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200576 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200577 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800578 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200579#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800580 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
581 cfs_rq->avg.load_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000582 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
583 cfs_rq->avg.runnable_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800584 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
585 cfs_rq->avg.util_avg);
Patrick Bellasi7f65ea42018-03-09 09:52:42 +0000586 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
587 cfs_rq->avg.util_est.enqueued);
Peter Zijlstra2a2f5d4e2017-05-08 16:51:41 +0200588 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
589 cfs_rq->removed.load_avg);
590 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
591 cfs_rq->removed.util_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000592 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
593 cfs_rq->removed.runnable_avg);
Alex Shi333bb862013-06-28 19:10:35 +0800594#ifdef CONFIG_FAIR_GROUP_SCHED
Yuyang Du9d89c252015-07-15 08:04:37 +0800595 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
596 cfs_rq->tg_load_avg_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800597 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
598 atomic_long_read(&cfs_rq->tg->load_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200599#endif
Alex Shi333bb862013-06-28 19:10:35 +0800600#endif
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700601#ifdef CONFIG_CFS_BANDWIDTH
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700602 SEQ_printf(m, " .%-30s: %d\n", "throttled",
603 cfs_rq->throttled);
604 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
605 cfs_rq->throttle_count);
606#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800607
Alex Shi333bb862013-06-28 19:10:35 +0800608#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530609 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200610#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200611}
612
Peter Zijlstraada18de2008-06-19 14:22:24 +0200613void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
614{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530615#ifdef CONFIG_RT_GROUP_SCHED
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400616 SEQ_printf(m, "\n");
617 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
Bharata B Raoefe25c22011-01-11 15:41:54 +0530618#else
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400619 SEQ_printf(m, "\n");
620 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530621#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200622
623#define P(x) \
624 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200625#define PU(x) \
626 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
Peter Zijlstraada18de2008-06-19 14:22:24 +0200627#define PN(x) \
628 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
629
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200630 PU(rt_nr_running);
631#ifdef CONFIG_SMP
632 PU(rt_nr_migratory);
633#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200634 P(rt_throttled);
635 PN(rt_time);
636 PN(rt_runtime);
637
638#undef PN
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200639#undef PU
Peter Zijlstraada18de2008-06-19 14:22:24 +0200640#undef P
641}
642
Wanpeng Liacb32132014-10-31 06:39:33 +0800643void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
644{
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500645 struct dl_bw *dl_bw;
646
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400647 SEQ_printf(m, "\n");
648 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200649
650#define PU(x) \
651 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
652
653 PU(dl_nr_running);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500654#ifdef CONFIG_SMP
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200655 PU(dl_nr_migratory);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500656 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
657#else
658 dl_bw = &dl_rq->dl_bw;
659#endif
660 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
661 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200662
663#undef PU
Wanpeng Liacb32132014-10-31 06:39:33 +0800664}
665
Ingo Molnara48da482007-08-09 11:16:51 +0200666static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200667{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900668 struct rq *rq = cpu_rq(cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530669 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200670
671#ifdef CONFIG_X86
672 {
673 unsigned int freq = cpu_khz ? : 1;
674
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800675 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200676 cpu, freq / 1000, (freq % 1000));
677 }
678#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800679 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200680#endif
681
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200682#define P(x) \
683do { \
684 if (sizeof(rq->x) == 4) \
685 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
686 else \
687 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
688} while (0)
689
Ingo Molnaref83a572007-10-15 17:00:08 +0200690#define PN(x) \
691 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200692
693 P(nr_running);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200694 P(nr_switches);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200695 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200696 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200697 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200698 PN(clock);
Peter Zijlstra5a537592015-01-05 11:18:12 +0100699 PN(clock_task);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200700#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200701#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200702
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100703#ifdef CONFIG_SMP
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800704#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100705 P64(avg_idle);
Alex Shi37e6bae2014-01-23 18:39:54 +0800706 P64(max_idle_balance_cost);
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800707#undef P64
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100708#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100709
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500710#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
Mel Gormancb251762016-02-05 09:08:36 +0000711 if (schedstat_enabled()) {
712 P(yld_count);
713 P(sched_count);
714 P(sched_goidle);
715 P(ttwu_count);
716 P(ttwu_local);
717 }
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100718#undef P
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500719
Bharata B Raoefe25c22011-01-11 15:41:54 +0530720 spin_lock_irqsave(&sched_debug_lock, flags);
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200721 print_cfs_stats(m, cpu);
Peter Zijlstraada18de2008-06-19 14:22:24 +0200722 print_rt_stats(m, cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +0800723 print_dl_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200724
Ingo Molnara48da482007-08-09 11:16:51 +0200725 print_rq(m, rq, cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530726 spin_unlock_irqrestore(&sched_debug_lock, flags);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800727 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200728}
729
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100730static const char *sched_tunable_scaling_names[] = {
731 "none",
Colin Ian Kingad2e3792018-11-28 15:23:50 +0000732 "logarithmic",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100733 "linear"
734};
735
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800736static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200737{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100738 u64 ktime, sched_clk, cpu_clk;
739 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200740
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100741 local_irq_save(flags);
742 ktime = ktime_to_ns(ktime_get());
743 sched_clk = sched_clock();
744 cpu_clk = local_clock();
745 local_irq_restore(flags);
746
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100747 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200748 init_utsname()->release,
749 (int)strcspn(init_utsname()->version, " "),
750 init_utsname()->version);
751
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100752#define P(x) \
753 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
754#define PN(x) \
755 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
756 PN(ktime);
757 PN(sched_clk);
758 PN(cpu_clk);
759 P(jiffies);
760#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Peter Zijlstra35af99e2013-11-28 19:38:42 +0100761 P(sched_clock_stable());
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100762#endif
763#undef PN
764#undef P
765
766 SEQ_printf(m, "\n");
767 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200768
Ingo Molnar1aa47312007-10-15 17:00:10 +0200769#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200770 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200771#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200772 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200773 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100774 PN(sysctl_sched_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200775 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700776 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200777 P(sysctl_sched_features);
778#undef PN
779#undef P
780
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800781 SEQ_printf(m, " .%-40s: %d (%s)\n",
782 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100783 sysctl_sched_tunable_scaling,
784 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200785 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800786}
787
788static int sched_debug_show(struct seq_file *m, void *v)
789{
790 int cpu = (unsigned long)(v - 2);
791
792 if (cpu != -1)
793 print_cpu(m, cpu);
794 else
795 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200796
797 return 0;
798}
799
Peter Zijlstra029632f2011-10-25 10:00:11 +0200800void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200801{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800802 int cpu;
803
804 sched_debug_header(NULL);
Wei Li02d4ac52019-12-26 16:52:24 +0800805 for_each_online_cpu(cpu) {
806 /*
807 * Need to reset softlockup watchdogs on all CPUs, because
808 * another CPU might be blocked waiting for us to process
809 * an IPI or stop_machine.
810 */
811 touch_nmi_watchdog();
812 touch_all_softlockup_watchdogs();
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800813 print_cpu(NULL, cpu);
Wei Li02d4ac52019-12-26 16:52:24 +0800814 }
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800815}
816
817/*
818 * This itererator needs some explanation.
819 * It returns 1 for the header position.
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100820 * This means 2 is CPU 0.
821 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
822 * to use cpumask_* to iterate over the CPUs.
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800823 */
824static void *sched_debug_start(struct seq_file *file, loff_t *offset)
825{
826 unsigned long n = *offset;
827
828 if (n == 0)
829 return (void *) 1;
830
831 n--;
832
833 if (n > 0)
834 n = cpumask_next(n - 1, cpu_online_mask);
835 else
836 n = cpumask_first(cpu_online_mask);
837
838 *offset = n + 1;
839
840 if (n < nr_cpu_ids)
841 return (void *)(unsigned long)(n + 2);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100842
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800843 return NULL;
844}
845
846static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
847{
848 (*offset)++;
849 return sched_debug_start(file, offset);
850}
851
852static void sched_debug_stop(struct seq_file *file, void *data)
853{
854}
855
856static const struct seq_operations sched_debug_sops = {
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100857 .start = sched_debug_start,
858 .next = sched_debug_next,
859 .stop = sched_debug_stop,
860 .show = sched_debug_show,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800861};
862
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200863static int __init init_sched_debug_procfs(void)
864{
Christoph Hellwigfddda2b2018-04-13 19:44:18 +0200865 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200866 return -ENOMEM;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200867 return 0;
868}
869
870__initcall(init_sched_debug_procfs);
871
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000872#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
873#define __P(F) __PS(#F, F)
874#define P(F) __PS(#F, p->F)
875#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
876#define __PN(F) __PSN(#F, F)
877#define PN(F) __PSN(#F, p->F)
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100878
879
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530880#ifdef CONFIG_NUMA_BALANCING
881void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
882 unsigned long tpf, unsigned long gsf, unsigned long gpf)
883{
884 SEQ_printf(m, "numa_faults node=%d ", node);
Srikar Dronamraju67d9f6c252018-06-20 22:32:47 +0530885 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
886 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530887}
888#endif
889
890
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100891static void sched_show_numa(struct task_struct *p, struct seq_file *m)
892{
893#ifdef CONFIG_NUMA_BALANCING
894 struct mempolicy *pol;
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100895
896 if (p->mm)
897 P(mm->numa_scan_seq);
898
899 task_lock(p);
900 pol = p->mempolicy;
901 if (pol && !(pol->flags & MPOL_F_MORON))
902 pol = NULL;
903 mpol_get(pol);
904 task_unlock(p);
905
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530906 P(numa_pages_migrated);
907 P(numa_preferred_nid);
908 P(total_numa_faults);
909 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
910 task_node(p), task_numa_group_id(p));
911 show_numa_stats(p, m);
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100912 mpol_put(pol);
913#endif
914}
915
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000916void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
917 struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200918{
Ingo Molnarcc367732007-10-15 17:00:18 +0200919 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200920
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000921 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700922 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200923 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530924 "---------------------------------------------------------"
925 "----------\n");
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000926
927#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
928#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200929
Ingo Molnaref83a572007-10-15 17:00:08 +0200930 PN(se.exec_start);
931 PN(se.vruntime);
932 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200933
Ingo Molnarcc367732007-10-15 17:00:18 +0200934 nr_switches = p->nvcsw + p->nivcsw;
935
Ingo Molnarcc367732007-10-15 17:00:18 +0200936 P(se.nr_migrations);
Ingo Molnarcc367732007-10-15 17:00:18 +0200937
Mel Gormancb251762016-02-05 09:08:36 +0000938 if (schedstat_enabled()) {
Ingo Molnarcc367732007-10-15 17:00:18 +0200939 u64 avg_atom, avg_per_cpu;
940
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500941 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
942 PN_SCHEDSTAT(se.statistics.wait_start);
943 PN_SCHEDSTAT(se.statistics.sleep_start);
944 PN_SCHEDSTAT(se.statistics.block_start);
945 PN_SCHEDSTAT(se.statistics.sleep_max);
946 PN_SCHEDSTAT(se.statistics.block_max);
947 PN_SCHEDSTAT(se.statistics.exec_max);
948 PN_SCHEDSTAT(se.statistics.slice_max);
949 PN_SCHEDSTAT(se.statistics.wait_max);
950 PN_SCHEDSTAT(se.statistics.wait_sum);
951 P_SCHEDSTAT(se.statistics.wait_count);
952 PN_SCHEDSTAT(se.statistics.iowait_sum);
953 P_SCHEDSTAT(se.statistics.iowait_count);
954 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
955 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
956 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
957 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
958 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
959 P_SCHEDSTAT(se.statistics.nr_wakeups);
960 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
961 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
962 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
963 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
964 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
965 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
966 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
967 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
Mel Gormancb251762016-02-05 09:08:36 +0000968
Ingo Molnarcc367732007-10-15 17:00:18 +0200969 avg_atom = p->se.sum_exec_runtime;
970 if (nr_switches)
Mateusz Guzikb0ab99e2014-06-14 15:00:09 +0200971 avg_atom = div64_ul(avg_atom, nr_switches);
Ingo Molnarcc367732007-10-15 17:00:18 +0200972 else
973 avg_atom = -1LL;
974
975 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a89742007-11-28 15:52:56 +0100976 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700977 avg_per_cpu = div64_u64(avg_per_cpu,
978 p->se.nr_migrations);
Ingo Molnarc1a89742007-11-28 15:52:56 +0100979 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +0200980 avg_per_cpu = -1LL;
Ingo Molnarc1a89742007-11-28 15:52:56 +0100981 }
Ingo Molnarcc367732007-10-15 17:00:18 +0200982
983 __PN(avg_atom);
984 __PN(avg_per_cpu);
985 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500986
Ingo Molnarcc367732007-10-15 17:00:18 +0200987 __P(nr_switches);
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000988 __PS("nr_voluntary_switches", p->nvcsw);
989 __PS("nr_involuntary_switches", p->nivcsw);
Ingo Molnarcc367732007-10-15 17:00:18 +0200990
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200991 P(se.load.weight);
Alex Shi333bb862013-06-28 19:10:35 +0800992#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800993 P(se.avg.load_sum);
Vincent Guittot9f683952020-02-24 09:52:18 +0000994 P(se.avg.runnable_sum);
Yuyang Du9d89c252015-07-15 08:04:37 +0800995 P(se.avg.util_sum);
996 P(se.avg.load_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000997 P(se.avg.runnable_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800998 P(se.avg.util_avg);
999 P(se.avg.last_update_time);
Patrick Bellasi7f65ea42018-03-09 09:52:42 +00001000 P(se.avg.util_est.ewma);
1001 P(se.avg.util_est.enqueued);
Kamalesh Babulal939fd732013-06-25 13:33:36 +05301002#endif
Valentin Schneider96e74eb2020-02-26 12:45:43 +00001003#ifdef CONFIG_UCLAMP_TASK
Pavankumar Kondetiad32bb42020-05-10 18:26:41 +05301004 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1005 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
Valentin Schneider96e74eb2020-02-26 12:45:43 +00001006 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1007 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1008#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001009 P(policy);
1010 P(prio);
Viresh Kumar1da18432018-11-05 16:51:55 +05301011 if (task_has_dl_policy(p)) {
Tommaso Cucinotta59f8c292016-10-26 11:17:17 +02001012 P(dl.runtime);
1013 P(dl.deadline);
1014 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001015#undef PN_SCHEDSTAT
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001016#undef P_SCHEDSTAT
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001017
1018 {
Ingo Molnar29d7b902008-11-16 08:07:15 +01001019 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001020 u64 t0, t1;
1021
Ingo Molnar29d7b902008-11-16 08:07:15 +01001022 t0 = cpu_clock(this_cpu);
1023 t1 = cpu_clock(this_cpu);
Valentin Schneider9e3bf942020-02-26 12:45:42 +00001024 __PS("clock-delta", t1-t0);
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001025 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001026
1027 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001028}
1029
1030void proc_sched_set_task(struct task_struct *p)
1031{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001032#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03001033 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001034#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001035}