blob: 461342f598ef88d42f2a00b37b66e0673ed17a74 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnar43ae34c2007-07-09 18:52:00 +02002/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01003 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02004 *
Ingo Molnar325ea102018-03-03 12:20:47 +01005 * Print the CFS rbtree and other debugging details
Ingo Molnar43ae34c2007-07-09 18:52:00 +02006 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
Ingo Molnar43ae34c2007-07-09 18:52:00 +02008 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02009#include "sched.h"
10
Bharata B Raoefe25c22011-01-11 15:41:54 +053011static DEFINE_SPINLOCK(sched_debug_lock);
12
Ingo Molnar43ae34c2007-07-09 18:52:00 +020013/*
14 * This allows printing both to /proc/sched_debug and
15 * to the console
16 */
17#define SEQ_printf(m, x...) \
18 do { \
19 if (m) \
20 seq_printf(m, x); \
21 else \
Joe Lawrencea8c024c2018-03-19 14:35:54 -040022 pr_cont(x); \
Ingo Molnar43ae34c2007-07-09 18:52:00 +020023 } while (0)
24
Ingo Molnaref83a572007-10-15 17:00:08 +020025/*
26 * Ease the printing of nsec fields:
27 */
Ingo Molnar90b26282007-12-30 17:24:35 +010028static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020029{
Ingo Molnar90b26282007-12-30 17:24:35 +010030 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020031 nsec = -nsec;
32 do_div(nsec, 1000000);
33 return -nsec;
34 }
35 do_div(nsec, 1000000);
36
37 return nsec;
38}
39
Ingo Molnar90b26282007-12-30 17:24:35 +010040static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020041{
Ingo Molnar90b26282007-12-30 17:24:35 +010042 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020043 nsec = -nsec;
44
45 return do_div(nsec, 1000000);
46}
47
48#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050050#define SCHED_FEAT(name, enabled) \
51 #name ,
52
53static const char * const sched_feat_names[] = {
54#include "features.h"
55};
56
57#undef SCHED_FEAT
58
59static int sched_feat_show(struct seq_file *m, void *v)
60{
61 int i;
62
63 for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 if (!(sysctl_sched_features & (1UL << i)))
65 seq_puts(m, "NO_");
66 seq_printf(m, "%s ", sched_feat_names[i]);
67 }
68 seq_puts(m, "\n");
69
70 return 0;
71}
72
Masahiro Yamadae9666d12018-12-31 00:14:15 +090073#ifdef CONFIG_JUMP_LABEL
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050074
75#define jump_label_key__true STATIC_KEY_INIT_TRUE
76#define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78#define SCHED_FEAT(name, enabled) \
79 jump_label_key__##enabled ,
80
81struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82#include "features.h"
83};
84
85#undef SCHED_FEAT
86
87static void sched_feat_disable(int i)
88{
Jiada Wange73e8192018-07-31 21:12:22 +090089 static_key_disable_cpuslocked(&sched_feat_keys[i]);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050090}
91
92static void sched_feat_enable(int i)
93{
Jiada Wange73e8192018-07-31 21:12:22 +090094 static_key_enable_cpuslocked(&sched_feat_keys[i]);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050095}
96#else
97static void sched_feat_disable(int i) { };
98static void sched_feat_enable(int i) { };
Masahiro Yamadae9666d12018-12-31 00:14:15 +090099#endif /* CONFIG_JUMP_LABEL */
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500100
101static int sched_feat_set(char *cmp)
102{
103 int i;
104 int neg = 0;
105
106 if (strncmp(cmp, "NO_", 3) == 0) {
107 neg = 1;
108 cmp += 3;
109 }
110
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 if (i < 0)
113 return i;
114
115 if (neg) {
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
118 } else {
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500121 }
122
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800123 return 0;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500124}
125
126static ssize_t
127sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
129{
130 char buf[64];
131 char *cmp;
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800132 int ret;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500133 struct inode *inode;
134
135 if (cnt > 63)
136 cnt = 63;
137
138 if (copy_from_user(&buf, ubuf, cnt))
139 return -EFAULT;
140
141 buf[cnt] = 0;
142 cmp = strstrip(buf);
143
144 /* Ensure the static_key remains in a consistent state */
145 inode = file_inode(filp);
Jiada Wange73e8192018-07-31 21:12:22 +0900146 cpus_read_lock();
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500147 inode_lock(inode);
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800148 ret = sched_feat_set(cmp);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500149 inode_unlock(inode);
Jiada Wange73e8192018-07-31 21:12:22 +0900150 cpus_read_unlock();
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800151 if (ret < 0)
152 return ret;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500153
154 *ppos += cnt;
155
156 return cnt;
157}
158
159static int sched_feat_open(struct inode *inode, struct file *filp)
160{
161 return single_open(filp, sched_feat_show, NULL);
162}
163
164static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170};
171
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100172#ifdef CONFIG_SMP
173
174static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
175 size_t cnt, loff_t *ppos)
176{
177 char buf[16];
178
179 if (cnt > 15)
180 cnt = 15;
181
182 if (copy_from_user(&buf, ubuf, cnt))
183 return -EFAULT;
184
185 if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
186 return -EINVAL;
187
188 if (sched_update_scaling())
189 return -EINVAL;
190
191 *ppos += cnt;
192 return cnt;
193}
194
195static int sched_scaling_show(struct seq_file *m, void *v)
196{
197 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
198 return 0;
199}
200
201static int sched_scaling_open(struct inode *inode, struct file *filp)
202{
203 return single_open(filp, sched_scaling_show, NULL);
204}
205
206static const struct file_operations sched_scaling_fops = {
207 .open = sched_scaling_open,
208 .write = sched_scaling_write,
209 .read = seq_read,
210 .llseek = seq_lseek,
211 .release = single_release,
212};
213
214#endif /* SMP */
215
Peter Zijlstra1011dcc2021-03-25 12:21:38 +0100216#ifdef CONFIG_PREEMPT_DYNAMIC
217
218static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
219 size_t cnt, loff_t *ppos)
220{
221 char buf[16];
222 int mode;
223
224 if (cnt > 15)
225 cnt = 15;
226
227 if (copy_from_user(&buf, ubuf, cnt))
228 return -EFAULT;
229
230 buf[cnt] = 0;
231 mode = sched_dynamic_mode(strstrip(buf));
232 if (mode < 0)
233 return mode;
234
235 sched_dynamic_update(mode);
236
237 *ppos += cnt;
238
239 return cnt;
240}
241
242static int sched_dynamic_show(struct seq_file *m, void *v)
243{
244 static const char * preempt_modes[] = {
245 "none", "voluntary", "full"
246 };
247 int i;
248
249 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
250 if (preempt_dynamic_mode == i)
251 seq_puts(m, "(");
252 seq_puts(m, preempt_modes[i]);
253 if (preempt_dynamic_mode == i)
254 seq_puts(m, ")");
255
256 seq_puts(m, " ");
257 }
258
259 seq_puts(m, "\n");
260 return 0;
261}
262
263static int sched_dynamic_open(struct inode *inode, struct file *filp)
264{
265 return single_open(filp, sched_dynamic_show, NULL);
266}
267
268static const struct file_operations sched_dynamic_fops = {
269 .open = sched_dynamic_open,
270 .write = sched_dynamic_write,
271 .read = seq_read,
272 .llseek = seq_lseek,
273 .release = single_release,
274};
275
276#endif /* CONFIG_PREEMPT_DYNAMIC */
277
Peter Zijlstra94064152021-04-15 18:23:17 +0200278__read_mostly bool sched_debug_verbose;
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200279
Peter Zijlstrad27e9ae2021-03-25 15:18:19 +0100280static const struct seq_operations sched_debug_sops;
281
282static int sched_debug_open(struct inode *inode, struct file *filp)
283{
284 return seq_open(filp, &sched_debug_sops);
285}
286
287static const struct file_operations sched_debug_fops = {
288 .open = sched_debug_open,
289 .read = seq_read,
290 .llseek = seq_lseek,
291 .release = seq_release,
292};
293
Peter Zijlstra1011dcc2021-03-25 12:21:38 +0100294static struct dentry *debugfs_sched;
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100295
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500296static __init int sched_init_debug(void)
297{
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100298 struct dentry __maybe_unused *numa;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500299
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100300 debugfs_sched = debugfs_create_dir("sched", NULL);
301
302 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
Peter Zijlstra94064152021-04-15 18:23:17 +0200303 debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
Peter Zijlstra1011dcc2021-03-25 12:21:38 +0100304#ifdef CONFIG_PREEMPT_DYNAMIC
305 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
306#endif
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100307
308 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
309 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
310 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
311
312#ifdef CONFIG_SMP
313 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
314 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
315 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100316
317 mutex_lock(&sched_domains_mutex);
318 update_sched_domain_debugfs();
319 mutex_unlock(&sched_domains_mutex);
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100320#endif
321
322#ifdef CONFIG_NUMA_BALANCING
323 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
324
325 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
326 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
327 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
328 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
329#endif
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200330
Peter Zijlstrad27e9ae2021-03-25 15:18:19 +0100331 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
332
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500333 return 0;
334}
335late_initcall(sched_init_debug);
336
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500337#ifdef CONFIG_SMP
338
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100339static cpumask_var_t sd_sysctl_cpus;
340static struct dentry *sd_dentry;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500341
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100342static int sd_flags_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500343{
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100344 unsigned long flags = *(unsigned int *)m->private;
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100345 int idx;
346
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100347 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100348 seq_puts(m, sd_flag_debug[idx].name);
349 seq_puts(m, " ");
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100350 }
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100351 seq_puts(m, "\n");
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100352
353 return 0;
354}
355
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100356static int sd_flags_open(struct inode *inode, struct file *file)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500357{
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100358 return single_open(file, sd_flags_show, inode->i_private);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500359}
360
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100361static const struct file_operations sd_flags_fops = {
362 .open = sd_flags_open,
363 .read = seq_read,
364 .llseek = seq_lseek,
365 .release = single_release,
366};
367
368static void register_sd(struct sched_domain *sd, struct dentry *parent)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500369{
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100370#define SDM(type, mode, member) \
371 debugfs_create_##type(#member, mode, parent, &sd->member)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500372
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100373 SDM(ulong, 0644, min_interval);
374 SDM(ulong, 0644, max_interval);
375 SDM(u64, 0644, max_newidle_lb_cost);
376 SDM(u32, 0644, busy_factor);
377 SDM(u32, 0644, imbalance_pct);
378 SDM(u32, 0644, cache_nice_tries);
379 SDM(str, 0444, name);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500380
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100381#undef SDM
382
383 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500384}
385
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100386void update_sched_domain_debugfs(void)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500387{
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100388 int cpu, i;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200389
390 if (!cpumask_available(sd_sysctl_cpus)) {
391 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
392 return;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200393 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
394 }
395
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100396 if (!sd_dentry)
397 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200398
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100399 for_each_cpu(cpu, sd_sysctl_cpus) {
400 struct sched_domain *sd;
401 struct dentry *d_cpu;
402 char buf[32];
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200403
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100404 snprintf(buf, sizeof(buf), "cpu%d", cpu);
405 debugfs_remove(debugfs_lookup(buf, sd_dentry));
406 d_cpu = debugfs_create_dir(buf, sd_dentry);
407
408 i = 0;
409 for_each_domain(cpu, sd) {
410 struct dentry *d_sd;
411
412 snprintf(buf, sizeof(buf), "domain%d", i);
413 d_sd = debugfs_create_dir(buf, d_cpu);
414
415 register_sd(sd, d_sd);
416 i++;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200417 }
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200418
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100419 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500420 }
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500421}
422
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200423void dirty_sched_domain_sysctl(int cpu)
424{
425 if (cpumask_available(sd_sysctl_cpus))
426 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
427}
428
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500429#endif /* CONFIG_SMP */
430
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530431#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +0100432static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530433{
434 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530435
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100436#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
437#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
438#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
439#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530440
Yuyang Ducd126af2015-07-15 08:04:36 +0800441 if (!se)
Ben Segall18bf2802012-10-04 12:51:20 +0200442 return;
Ben Segall18bf2802012-10-04 12:51:20 +0200443
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530444 PN(se->exec_start);
445 PN(se->vruntime);
446 PN(se->sum_exec_runtime);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100447
Mel Gormancb251762016-02-05 09:08:36 +0000448 if (schedstat_enabled()) {
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500449 PN_SCHEDSTAT(se->statistics.wait_start);
450 PN_SCHEDSTAT(se->statistics.sleep_start);
451 PN_SCHEDSTAT(se->statistics.block_start);
452 PN_SCHEDSTAT(se->statistics.sleep_max);
453 PN_SCHEDSTAT(se->statistics.block_max);
454 PN_SCHEDSTAT(se->statistics.exec_max);
455 PN_SCHEDSTAT(se->statistics.slice_max);
456 PN_SCHEDSTAT(se->statistics.wait_max);
457 PN_SCHEDSTAT(se->statistics.wait_sum);
458 P_SCHEDSTAT(se->statistics.wait_count);
Mel Gormancb251762016-02-05 09:08:36 +0000459 }
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100460
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530461 P(se->load.weight);
Paul Turner9d85f212012-10-04 13:18:29 +0200462#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800463 P(se->avg.load_avg);
464 P(se->avg.util_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000465 P(se->avg.runnable_avg);
Paul Turner9d85f212012-10-04 13:18:29 +0200466#endif
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500467
468#undef PN_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530469#undef PN
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500470#undef P_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530471#undef P
472}
473#endif
474
Bharata B Raoefe25c22011-01-11 15:41:54 +0530475#ifdef CONFIG_CGROUP_SCHED
476static char group_path[PATH_MAX];
477
478static char *task_group_path(struct task_group *tg)
479{
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530480 if (autogroup_path(tg, group_path, PATH_MAX))
481 return group_path;
482
Tejun Heo4c737b42016-08-10 11:23:44 -0400483 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100484
Tejun Heo4c737b42016-08-10 11:23:44 -0400485 return group_path;
Bharata B Raoefe25c22011-01-11 15:41:54 +0530486}
487#endif
488
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200489static void
Ingo Molnara48da482007-08-09 11:16:51 +0200490print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200491{
Hui Su65bcf072020-10-31 01:32:23 +0800492 if (task_current(rq, p))
Xie XiuQie8c16492017-08-07 16:44:22 +0800493 SEQ_printf(m, ">R");
Xie XiuQi20435d82017-08-07 16:44:23 +0800494 else
495 SEQ_printf(m, " %c", task_state_to_char(p));
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200496
Xie XiuQif080d932020-04-14 20:57:21 +0800497 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200498 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200499 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200500 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100501 p->prio);
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500502
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530503 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500504 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530505 SPLIT_NS(p->se.sum_exec_runtime),
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500506 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500507
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100508#ifdef CONFIG_NUMA_BALANCING
Srikar Dronamrajue3d24d02015-06-25 22:51:42 +0530509 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100510#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530511#ifdef CONFIG_CGROUP_SCHED
512 SEQ_printf(m, " %s", task_group_path(task_group(p)));
513#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200514
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200515 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200516}
517
Ingo Molnara48da482007-08-09 11:16:51 +0200518static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200519{
520 struct task_struct *g, *p;
521
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400522 SEQ_printf(m, "\n");
523 SEQ_printf(m, "runnable tasks:\n");
Xie XiuQif080d932020-04-14 20:57:21 +0800524 SEQ_printf(m, " S task PID tree-key switches prio"
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400525 " wait-time sum-exec sum-sleep\n");
526 SEQ_printf(m, "-------------------------------------------------------"
Xie XiuQif080d932020-04-14 20:57:21 +0800527 "------------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200528
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200529 rcu_read_lock();
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200530 for_each_process_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100531 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200532 continue;
533
Ingo Molnara48da482007-08-09 11:16:51 +0200534 print_task(m, rq, p);
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200535 }
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200536 rcu_read_unlock();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200537}
538
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200539void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200540{
Ingo Molnar86d95602007-10-15 17:00:06 +0200541 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
542 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900543 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200544 struct sched_entity *last;
545 unsigned long flags;
546
Bharata B Raoefe25c22011-01-11 15:41:54 +0530547#ifdef CONFIG_FAIR_GROUP_SCHED
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400548 SEQ_printf(m, "\n");
549 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
Bharata B Raoefe25c22011-01-11 15:41:54 +0530550#else
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400551 SEQ_printf(m, "\n");
552 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530553#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200554 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
555 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200556
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100557 raw_spin_lock_irqsave(&rq->lock, flags);
Davidlohr Buesobfb06882017-09-08 16:14:55 -0700558 if (rb_first_cached(&cfs_rq->tasks_timeline))
Rik van Rielac53db52011-02-01 09:51:03 -0500559 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200560 last = __pick_last_entity(cfs_rq);
561 if (last)
562 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100563 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900564 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100565 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200566 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
567 SPLIT_NS(MIN_vruntime));
568 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
569 SPLIT_NS(min_vruntime));
570 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
571 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200572 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200573 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
574 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200575 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200576 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
577 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100578 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200579 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200580 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800581 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200582#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800583 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
584 cfs_rq->avg.load_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000585 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
586 cfs_rq->avg.runnable_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800587 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
588 cfs_rq->avg.util_avg);
Patrick Bellasi7f65ea42018-03-09 09:52:42 +0000589 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
590 cfs_rq->avg.util_est.enqueued);
Peter Zijlstra2a2f5d4e2017-05-08 16:51:41 +0200591 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
592 cfs_rq->removed.load_avg);
593 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
594 cfs_rq->removed.util_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000595 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
596 cfs_rq->removed.runnable_avg);
Alex Shi333bb862013-06-28 19:10:35 +0800597#ifdef CONFIG_FAIR_GROUP_SCHED
Yuyang Du9d89c252015-07-15 08:04:37 +0800598 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
599 cfs_rq->tg_load_avg_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800600 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
601 atomic_long_read(&cfs_rq->tg->load_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200602#endif
Alex Shi333bb862013-06-28 19:10:35 +0800603#endif
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700604#ifdef CONFIG_CFS_BANDWIDTH
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700605 SEQ_printf(m, " .%-30s: %d\n", "throttled",
606 cfs_rq->throttled);
607 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
608 cfs_rq->throttle_count);
609#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800610
Alex Shi333bb862013-06-28 19:10:35 +0800611#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530612 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200613#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200614}
615
Peter Zijlstraada18de2008-06-19 14:22:24 +0200616void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
617{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530618#ifdef CONFIG_RT_GROUP_SCHED
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400619 SEQ_printf(m, "\n");
620 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
Bharata B Raoefe25c22011-01-11 15:41:54 +0530621#else
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400622 SEQ_printf(m, "\n");
623 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530624#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200625
626#define P(x) \
627 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200628#define PU(x) \
629 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
Peter Zijlstraada18de2008-06-19 14:22:24 +0200630#define PN(x) \
631 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
632
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200633 PU(rt_nr_running);
634#ifdef CONFIG_SMP
635 PU(rt_nr_migratory);
636#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200637 P(rt_throttled);
638 PN(rt_time);
639 PN(rt_runtime);
640
641#undef PN
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200642#undef PU
Peter Zijlstraada18de2008-06-19 14:22:24 +0200643#undef P
644}
645
Wanpeng Liacb32132014-10-31 06:39:33 +0800646void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
647{
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500648 struct dl_bw *dl_bw;
649
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400650 SEQ_printf(m, "\n");
651 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200652
653#define PU(x) \
654 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
655
656 PU(dl_nr_running);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500657#ifdef CONFIG_SMP
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200658 PU(dl_nr_migratory);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500659 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
660#else
661 dl_bw = &dl_rq->dl_bw;
662#endif
663 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
664 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200665
666#undef PU
Wanpeng Liacb32132014-10-31 06:39:33 +0800667}
668
Ingo Molnara48da482007-08-09 11:16:51 +0200669static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200670{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900671 struct rq *rq = cpu_rq(cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530672 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200673
674#ifdef CONFIG_X86
675 {
676 unsigned int freq = cpu_khz ? : 1;
677
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800678 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200679 cpu, freq / 1000, (freq % 1000));
680 }
681#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800682 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200683#endif
684
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200685#define P(x) \
686do { \
687 if (sizeof(rq->x) == 4) \
688 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
689 else \
690 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
691} while (0)
692
Ingo Molnaref83a572007-10-15 17:00:08 +0200693#define PN(x) \
694 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200695
696 P(nr_running);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200697 P(nr_switches);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200698 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200699 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200700 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200701 PN(clock);
Peter Zijlstra5a537592015-01-05 11:18:12 +0100702 PN(clock_task);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200703#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200704#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200705
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100706#ifdef CONFIG_SMP
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800707#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100708 P64(avg_idle);
Alex Shi37e6bae2014-01-23 18:39:54 +0800709 P64(max_idle_balance_cost);
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800710#undef P64
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100711#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100712
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500713#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
Mel Gormancb251762016-02-05 09:08:36 +0000714 if (schedstat_enabled()) {
715 P(yld_count);
716 P(sched_count);
717 P(sched_goidle);
718 P(ttwu_count);
719 P(ttwu_local);
720 }
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100721#undef P
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500722
Bharata B Raoefe25c22011-01-11 15:41:54 +0530723 spin_lock_irqsave(&sched_debug_lock, flags);
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200724 print_cfs_stats(m, cpu);
Peter Zijlstraada18de2008-06-19 14:22:24 +0200725 print_rt_stats(m, cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +0800726 print_dl_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200727
Ingo Molnara48da482007-08-09 11:16:51 +0200728 print_rq(m, rq, cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530729 spin_unlock_irqrestore(&sched_debug_lock, flags);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800730 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200731}
732
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100733static const char *sched_tunable_scaling_names[] = {
734 "none",
Colin Ian Kingad2e3792018-11-28 15:23:50 +0000735 "logarithmic",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100736 "linear"
737};
738
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800739static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200740{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100741 u64 ktime, sched_clk, cpu_clk;
742 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200743
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100744 local_irq_save(flags);
745 ktime = ktime_to_ns(ktime_get());
746 sched_clk = sched_clock();
747 cpu_clk = local_clock();
748 local_irq_restore(flags);
749
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100750 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200751 init_utsname()->release,
752 (int)strcspn(init_utsname()->version, " "),
753 init_utsname()->version);
754
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100755#define P(x) \
756 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
757#define PN(x) \
758 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
759 PN(ktime);
760 PN(sched_clk);
761 PN(cpu_clk);
762 P(jiffies);
763#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Peter Zijlstra35af99e2013-11-28 19:38:42 +0100764 P(sched_clock_stable());
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100765#endif
766#undef PN
767#undef P
768
769 SEQ_printf(m, "\n");
770 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200771
Ingo Molnar1aa47312007-10-15 17:00:10 +0200772#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200773 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200774#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200775 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200776 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100777 PN(sysctl_sched_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200778 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700779 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200780 P(sysctl_sched_features);
781#undef PN
782#undef P
783
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800784 SEQ_printf(m, " .%-40s: %d (%s)\n",
785 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100786 sysctl_sched_tunable_scaling,
787 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200788 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800789}
790
791static int sched_debug_show(struct seq_file *m, void *v)
792{
793 int cpu = (unsigned long)(v - 2);
794
795 if (cpu != -1)
796 print_cpu(m, cpu);
797 else
798 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200799
800 return 0;
801}
802
Peter Zijlstra029632f2011-10-25 10:00:11 +0200803void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200804{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800805 int cpu;
806
807 sched_debug_header(NULL);
Wei Li02d4ac52019-12-26 16:52:24 +0800808 for_each_online_cpu(cpu) {
809 /*
810 * Need to reset softlockup watchdogs on all CPUs, because
811 * another CPU might be blocked waiting for us to process
812 * an IPI or stop_machine.
813 */
814 touch_nmi_watchdog();
815 touch_all_softlockup_watchdogs();
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800816 print_cpu(NULL, cpu);
Wei Li02d4ac52019-12-26 16:52:24 +0800817 }
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800818}
819
820/*
Ingo Molnar3b037062021-03-18 13:38:50 +0100821 * This iterator needs some explanation.
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800822 * It returns 1 for the header position.
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100823 * This means 2 is CPU 0.
824 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
825 * to use cpumask_* to iterate over the CPUs.
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800826 */
827static void *sched_debug_start(struct seq_file *file, loff_t *offset)
828{
829 unsigned long n = *offset;
830
831 if (n == 0)
832 return (void *) 1;
833
834 n--;
835
836 if (n > 0)
837 n = cpumask_next(n - 1, cpu_online_mask);
838 else
839 n = cpumask_first(cpu_online_mask);
840
841 *offset = n + 1;
842
843 if (n < nr_cpu_ids)
844 return (void *)(unsigned long)(n + 2);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100845
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800846 return NULL;
847}
848
849static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
850{
851 (*offset)++;
852 return sched_debug_start(file, offset);
853}
854
855static void sched_debug_stop(struct seq_file *file, void *data)
856{
857}
858
859static const struct seq_operations sched_debug_sops = {
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100860 .start = sched_debug_start,
861 .next = sched_debug_next,
862 .stop = sched_debug_stop,
863 .show = sched_debug_show,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800864};
865
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000866#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
867#define __P(F) __PS(#F, F)
868#define P(F) __PS(#F, p->F)
869#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
870#define __PN(F) __PSN(#F, F)
871#define PN(F) __PSN(#F, p->F)
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100872
873
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530874#ifdef CONFIG_NUMA_BALANCING
875void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
876 unsigned long tpf, unsigned long gsf, unsigned long gpf)
877{
878 SEQ_printf(m, "numa_faults node=%d ", node);
Srikar Dronamraju67d9f6c252018-06-20 22:32:47 +0530879 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
880 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530881}
882#endif
883
884
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100885static void sched_show_numa(struct task_struct *p, struct seq_file *m)
886{
887#ifdef CONFIG_NUMA_BALANCING
888 struct mempolicy *pol;
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100889
890 if (p->mm)
891 P(mm->numa_scan_seq);
892
893 task_lock(p);
894 pol = p->mempolicy;
895 if (pol && !(pol->flags & MPOL_F_MORON))
896 pol = NULL;
897 mpol_get(pol);
898 task_unlock(p);
899
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530900 P(numa_pages_migrated);
901 P(numa_preferred_nid);
902 P(total_numa_faults);
903 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
904 task_node(p), task_numa_group_id(p));
905 show_numa_stats(p, m);
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100906 mpol_put(pol);
907#endif
908}
909
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000910void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
911 struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200912{
Ingo Molnarcc367732007-10-15 17:00:18 +0200913 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200914
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000915 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700916 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200917 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530918 "---------------------------------------------------------"
919 "----------\n");
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000920
921#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
922#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200923
Ingo Molnaref83a572007-10-15 17:00:08 +0200924 PN(se.exec_start);
925 PN(se.vruntime);
926 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200927
Ingo Molnarcc367732007-10-15 17:00:18 +0200928 nr_switches = p->nvcsw + p->nivcsw;
929
Ingo Molnarcc367732007-10-15 17:00:18 +0200930 P(se.nr_migrations);
Ingo Molnarcc367732007-10-15 17:00:18 +0200931
Mel Gormancb251762016-02-05 09:08:36 +0000932 if (schedstat_enabled()) {
Ingo Molnarcc367732007-10-15 17:00:18 +0200933 u64 avg_atom, avg_per_cpu;
934
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500935 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
936 PN_SCHEDSTAT(se.statistics.wait_start);
937 PN_SCHEDSTAT(se.statistics.sleep_start);
938 PN_SCHEDSTAT(se.statistics.block_start);
939 PN_SCHEDSTAT(se.statistics.sleep_max);
940 PN_SCHEDSTAT(se.statistics.block_max);
941 PN_SCHEDSTAT(se.statistics.exec_max);
942 PN_SCHEDSTAT(se.statistics.slice_max);
943 PN_SCHEDSTAT(se.statistics.wait_max);
944 PN_SCHEDSTAT(se.statistics.wait_sum);
945 P_SCHEDSTAT(se.statistics.wait_count);
946 PN_SCHEDSTAT(se.statistics.iowait_sum);
947 P_SCHEDSTAT(se.statistics.iowait_count);
948 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
949 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
950 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
951 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
952 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
953 P_SCHEDSTAT(se.statistics.nr_wakeups);
954 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
955 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
956 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
957 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
958 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
959 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
960 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
961 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
Mel Gormancb251762016-02-05 09:08:36 +0000962
Ingo Molnarcc367732007-10-15 17:00:18 +0200963 avg_atom = p->se.sum_exec_runtime;
964 if (nr_switches)
Mateusz Guzikb0ab99e2014-06-14 15:00:09 +0200965 avg_atom = div64_ul(avg_atom, nr_switches);
Ingo Molnarcc367732007-10-15 17:00:18 +0200966 else
967 avg_atom = -1LL;
968
969 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a89742007-11-28 15:52:56 +0100970 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700971 avg_per_cpu = div64_u64(avg_per_cpu,
972 p->se.nr_migrations);
Ingo Molnarc1a89742007-11-28 15:52:56 +0100973 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +0200974 avg_per_cpu = -1LL;
Ingo Molnarc1a89742007-11-28 15:52:56 +0100975 }
Ingo Molnarcc367732007-10-15 17:00:18 +0200976
977 __PN(avg_atom);
978 __PN(avg_per_cpu);
979 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500980
Ingo Molnarcc367732007-10-15 17:00:18 +0200981 __P(nr_switches);
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000982 __PS("nr_voluntary_switches", p->nvcsw);
983 __PS("nr_involuntary_switches", p->nivcsw);
Ingo Molnarcc367732007-10-15 17:00:18 +0200984
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200985 P(se.load.weight);
Alex Shi333bb862013-06-28 19:10:35 +0800986#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800987 P(se.avg.load_sum);
Vincent Guittot9f683952020-02-24 09:52:18 +0000988 P(se.avg.runnable_sum);
Yuyang Du9d89c252015-07-15 08:04:37 +0800989 P(se.avg.util_sum);
990 P(se.avg.load_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000991 P(se.avg.runnable_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800992 P(se.avg.util_avg);
993 P(se.avg.last_update_time);
Patrick Bellasi7f65ea42018-03-09 09:52:42 +0000994 P(se.avg.util_est.ewma);
995 P(se.avg.util_est.enqueued);
Kamalesh Babulal939fd732013-06-25 13:33:36 +0530996#endif
Valentin Schneider96e74eb2020-02-26 12:45:43 +0000997#ifdef CONFIG_UCLAMP_TASK
Pavankumar Kondetiad32bb42020-05-10 18:26:41 +0530998 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
999 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
Valentin Schneider96e74eb2020-02-26 12:45:43 +00001000 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1001 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1002#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001003 P(policy);
1004 P(prio);
Viresh Kumar1da18432018-11-05 16:51:55 +05301005 if (task_has_dl_policy(p)) {
Tommaso Cucinotta59f8c292016-10-26 11:17:17 +02001006 P(dl.runtime);
1007 P(dl.deadline);
1008 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001009#undef PN_SCHEDSTAT
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001010#undef P_SCHEDSTAT
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001011
1012 {
Ingo Molnar29d7b902008-11-16 08:07:15 +01001013 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001014 u64 t0, t1;
1015
Ingo Molnar29d7b902008-11-16 08:07:15 +01001016 t0 = cpu_clock(this_cpu);
1017 t1 = cpu_clock(this_cpu);
Valentin Schneider9e3bf942020-02-26 12:45:42 +00001018 __PS("clock-delta", t1-t0);
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001019 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001020
1021 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001022}
1023
1024void proc_sched_set_task(struct task_struct *p)
1025{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001026#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03001027 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001028#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001029}