blob: 7dcbaa31c5d917adc8d53fe941016cb1e88c1c16 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnar43ae34c2007-07-09 18:52:00 +02002/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01003 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02004 *
Ingo Molnar325ea102018-03-03 12:20:47 +01005 * Print the CFS rbtree and other debugging details
Ingo Molnar43ae34c2007-07-09 18:52:00 +02006 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
Ingo Molnar43ae34c2007-07-09 18:52:00 +02008 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02009#include "sched.h"
10
Ingo Molnar43ae34c2007-07-09 18:52:00 +020011/*
12 * This allows printing both to /proc/sched_debug and
13 * to the console
14 */
15#define SEQ_printf(m, x...) \
16 do { \
17 if (m) \
18 seq_printf(m, x); \
19 else \
Joe Lawrencea8c024c2018-03-19 14:35:54 -040020 pr_cont(x); \
Ingo Molnar43ae34c2007-07-09 18:52:00 +020021 } while (0)
22
Ingo Molnaref83a572007-10-15 17:00:08 +020023/*
24 * Ease the printing of nsec fields:
25 */
Ingo Molnar90b26282007-12-30 17:24:35 +010026static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020027{
Ingo Molnar90b26282007-12-30 17:24:35 +010028 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020029 nsec = -nsec;
30 do_div(nsec, 1000000);
31 return -nsec;
32 }
33 do_div(nsec, 1000000);
34
35 return nsec;
36}
37
Ingo Molnar90b26282007-12-30 17:24:35 +010038static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020039{
Ingo Molnar90b26282007-12-30 17:24:35 +010040 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020041 nsec = -nsec;
42
43 return do_div(nsec, 1000000);
44}
45
46#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
47
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050048#define SCHED_FEAT(name, enabled) \
49 #name ,
50
51static const char * const sched_feat_names[] = {
52#include "features.h"
53};
54
55#undef SCHED_FEAT
56
57static int sched_feat_show(struct seq_file *m, void *v)
58{
59 int i;
60
61 for (i = 0; i < __SCHED_FEAT_NR; i++) {
62 if (!(sysctl_sched_features & (1UL << i)))
63 seq_puts(m, "NO_");
64 seq_printf(m, "%s ", sched_feat_names[i]);
65 }
66 seq_puts(m, "\n");
67
68 return 0;
69}
70
Masahiro Yamadae9666d12018-12-31 00:14:15 +090071#ifdef CONFIG_JUMP_LABEL
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050072
73#define jump_label_key__true STATIC_KEY_INIT_TRUE
74#define jump_label_key__false STATIC_KEY_INIT_FALSE
75
76#define SCHED_FEAT(name, enabled) \
77 jump_label_key__##enabled ,
78
79struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
80#include "features.h"
81};
82
83#undef SCHED_FEAT
84
85static void sched_feat_disable(int i)
86{
Jiada Wange73e8192018-07-31 21:12:22 +090087 static_key_disable_cpuslocked(&sched_feat_keys[i]);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050088}
89
90static void sched_feat_enable(int i)
91{
Jiada Wange73e8192018-07-31 21:12:22 +090092 static_key_enable_cpuslocked(&sched_feat_keys[i]);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050093}
94#else
95static void sched_feat_disable(int i) { };
96static void sched_feat_enable(int i) { };
Masahiro Yamadae9666d12018-12-31 00:14:15 +090097#endif /* CONFIG_JUMP_LABEL */
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050098
99static int sched_feat_set(char *cmp)
100{
101 int i;
102 int neg = 0;
103
104 if (strncmp(cmp, "NO_", 3) == 0) {
105 neg = 1;
106 cmp += 3;
107 }
108
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800109 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
110 if (i < 0)
111 return i;
112
113 if (neg) {
114 sysctl_sched_features &= ~(1UL << i);
115 sched_feat_disable(i);
116 } else {
117 sysctl_sched_features |= (1UL << i);
118 sched_feat_enable(i);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500119 }
120
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800121 return 0;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500122}
123
124static ssize_t
125sched_feat_write(struct file *filp, const char __user *ubuf,
126 size_t cnt, loff_t *ppos)
127{
128 char buf[64];
129 char *cmp;
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800130 int ret;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500131 struct inode *inode;
132
133 if (cnt > 63)
134 cnt = 63;
135
136 if (copy_from_user(&buf, ubuf, cnt))
137 return -EFAULT;
138
139 buf[cnt] = 0;
140 cmp = strstrip(buf);
141
142 /* Ensure the static_key remains in a consistent state */
143 inode = file_inode(filp);
Jiada Wange73e8192018-07-31 21:12:22 +0900144 cpus_read_lock();
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500145 inode_lock(inode);
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800146 ret = sched_feat_set(cmp);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500147 inode_unlock(inode);
Jiada Wange73e8192018-07-31 21:12:22 +0900148 cpus_read_unlock();
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800149 if (ret < 0)
150 return ret;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500151
152 *ppos += cnt;
153
154 return cnt;
155}
156
157static int sched_feat_open(struct inode *inode, struct file *filp)
158{
159 return single_open(filp, sched_feat_show, NULL);
160}
161
162static const struct file_operations sched_feat_fops = {
163 .open = sched_feat_open,
164 .write = sched_feat_write,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168};
169
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100170#ifdef CONFIG_SMP
171
172static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
173 size_t cnt, loff_t *ppos)
174{
175 char buf[16];
Mel Gorman70306612021-09-27 12:46:35 +0100176 unsigned int scaling;
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100177
178 if (cnt > 15)
179 cnt = 15;
180
181 if (copy_from_user(&buf, ubuf, cnt))
182 return -EFAULT;
Mel Gorman70306612021-09-27 12:46:35 +0100183 buf[cnt] = '\0';
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100184
Mel Gorman70306612021-09-27 12:46:35 +0100185 if (kstrtouint(buf, 10, &scaling))
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100186 return -EINVAL;
187
Mel Gorman70306612021-09-27 12:46:35 +0100188 if (scaling >= SCHED_TUNABLESCALING_END)
189 return -EINVAL;
190
191 sysctl_sched_tunable_scaling = scaling;
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100192 if (sched_update_scaling())
193 return -EINVAL;
194
195 *ppos += cnt;
196 return cnt;
197}
198
199static int sched_scaling_show(struct seq_file *m, void *v)
200{
201 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
202 return 0;
203}
204
205static int sched_scaling_open(struct inode *inode, struct file *filp)
206{
207 return single_open(filp, sched_scaling_show, NULL);
208}
209
210static const struct file_operations sched_scaling_fops = {
211 .open = sched_scaling_open,
212 .write = sched_scaling_write,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
218#endif /* SMP */
219
Peter Zijlstra1011dcc2021-03-25 12:21:38 +0100220#ifdef CONFIG_PREEMPT_DYNAMIC
221
222static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
223 size_t cnt, loff_t *ppos)
224{
225 char buf[16];
226 int mode;
227
228 if (cnt > 15)
229 cnt = 15;
230
231 if (copy_from_user(&buf, ubuf, cnt))
232 return -EFAULT;
233
234 buf[cnt] = 0;
235 mode = sched_dynamic_mode(strstrip(buf));
236 if (mode < 0)
237 return mode;
238
239 sched_dynamic_update(mode);
240
241 *ppos += cnt;
242
243 return cnt;
244}
245
246static int sched_dynamic_show(struct seq_file *m, void *v)
247{
248 static const char * preempt_modes[] = {
249 "none", "voluntary", "full"
250 };
251 int i;
252
253 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
254 if (preempt_dynamic_mode == i)
255 seq_puts(m, "(");
256 seq_puts(m, preempt_modes[i]);
257 if (preempt_dynamic_mode == i)
258 seq_puts(m, ")");
259
260 seq_puts(m, " ");
261 }
262
263 seq_puts(m, "\n");
264 return 0;
265}
266
267static int sched_dynamic_open(struct inode *inode, struct file *filp)
268{
269 return single_open(filp, sched_dynamic_show, NULL);
270}
271
272static const struct file_operations sched_dynamic_fops = {
273 .open = sched_dynamic_open,
274 .write = sched_dynamic_write,
275 .read = seq_read,
276 .llseek = seq_lseek,
277 .release = single_release,
278};
279
280#endif /* CONFIG_PREEMPT_DYNAMIC */
281
Peter Zijlstra94064152021-04-15 18:23:17 +0200282__read_mostly bool sched_debug_verbose;
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200283
Peter Zijlstrad27e9ae2021-03-25 15:18:19 +0100284static const struct seq_operations sched_debug_sops;
285
286static int sched_debug_open(struct inode *inode, struct file *filp)
287{
288 return seq_open(filp, &sched_debug_sops);
289}
290
291static const struct file_operations sched_debug_fops = {
292 .open = sched_debug_open,
293 .read = seq_read,
294 .llseek = seq_lseek,
295 .release = seq_release,
296};
297
Peter Zijlstra1011dcc2021-03-25 12:21:38 +0100298static struct dentry *debugfs_sched;
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100299
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500300static __init int sched_init_debug(void)
301{
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100302 struct dentry __maybe_unused *numa;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500303
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100304 debugfs_sched = debugfs_create_dir("sched", NULL);
305
306 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
Peter Zijlstra94064152021-04-15 18:23:17 +0200307 debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
Peter Zijlstra1011dcc2021-03-25 12:21:38 +0100308#ifdef CONFIG_PREEMPT_DYNAMIC
309 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
310#endif
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100311
312 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
313 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
Josh Don51ce83e2021-08-19 18:04:02 -0700314 debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100315 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
316
Paul Turnerc006fac2021-04-16 14:29:36 -0700317 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
318 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
319
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100320#ifdef CONFIG_SMP
321 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
322 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
323 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100324
325 mutex_lock(&sched_domains_mutex);
326 update_sched_domain_debugfs();
327 mutex_unlock(&sched_domains_mutex);
Peter Zijlstra8a99b682021-03-24 11:43:21 +0100328#endif
329
330#ifdef CONFIG_NUMA_BALANCING
331 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
332
333 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
334 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
335 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
336 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
337#endif
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200338
Peter Zijlstrad27e9ae2021-03-25 15:18:19 +0100339 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
340
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500341 return 0;
342}
343late_initcall(sched_init_debug);
344
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500345#ifdef CONFIG_SMP
346
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100347static cpumask_var_t sd_sysctl_cpus;
348static struct dentry *sd_dentry;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500349
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100350static int sd_flags_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500351{
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100352 unsigned long flags = *(unsigned int *)m->private;
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100353 int idx;
354
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100355 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100356 seq_puts(m, sd_flag_debug[idx].name);
357 seq_puts(m, " ");
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100358 }
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100359 seq_puts(m, "\n");
Valentin Schneider5b9f8ff2020-08-17 12:29:52 +0100360
361 return 0;
362}
363
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100364static int sd_flags_open(struct inode *inode, struct file *file)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500365{
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100366 return single_open(file, sd_flags_show, inode->i_private);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500367}
368
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100369static const struct file_operations sd_flags_fops = {
370 .open = sd_flags_open,
371 .read = seq_read,
372 .llseek = seq_lseek,
373 .release = single_release,
374};
375
376static void register_sd(struct sched_domain *sd, struct dentry *parent)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500377{
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100378#define SDM(type, mode, member) \
379 debugfs_create_##type(#member, mode, parent, &sd->member)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500380
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100381 SDM(ulong, 0644, min_interval);
382 SDM(ulong, 0644, max_interval);
383 SDM(u64, 0644, max_newidle_lb_cost);
384 SDM(u32, 0644, busy_factor);
385 SDM(u32, 0644, imbalance_pct);
386 SDM(u32, 0644, cache_nice_tries);
387 SDM(str, 0444, name);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500388
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100389#undef SDM
390
391 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500392}
393
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100394void update_sched_domain_debugfs(void)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500395{
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100396 int cpu, i;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200397
Valentin Schneider459b09b2021-05-18 14:07:25 +0100398 /*
399 * This can unfortunately be invoked before sched_debug_init() creates
400 * the debug directory. Don't touch sd_sysctl_cpus until then.
401 */
402 if (!debugfs_sched)
403 return;
404
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200405 if (!cpumask_available(sd_sysctl_cpus)) {
406 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
407 return;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200408 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
409 }
410
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100411 if (!sd_dentry)
412 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200413
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100414 for_each_cpu(cpu, sd_sysctl_cpus) {
415 struct sched_domain *sd;
416 struct dentry *d_cpu;
417 char buf[32];
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200418
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100419 snprintf(buf, sizeof(buf), "cpu%d", cpu);
420 debugfs_remove(debugfs_lookup(buf, sd_dentry));
421 d_cpu = debugfs_create_dir(buf, sd_dentry);
422
423 i = 0;
424 for_each_domain(cpu, sd) {
425 struct dentry *d_sd;
426
427 snprintf(buf, sizeof(buf), "domain%d", i);
428 d_sd = debugfs_create_dir(buf, d_cpu);
429
430 register_sd(sd, d_sd);
431 i++;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200432 }
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200433
Peter Zijlstra3b87f132021-03-25 11:31:20 +0100434 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500435 }
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500436}
437
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200438void dirty_sched_domain_sysctl(int cpu)
439{
440 if (cpumask_available(sd_sysctl_cpus))
441 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
442}
443
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500444#endif /* CONFIG_SMP */
445
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530446#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +0100447static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530448{
449 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530450
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100451#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
Yafang Shaoceeadb82021-09-05 14:35:41 +0000452#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
453 #F, (long long)schedstat_val(stats->F))
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100454#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
Yafang Shaoceeadb82021-09-05 14:35:41 +0000455#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
456 #F, SPLIT_NS((long long)schedstat_val(stats->F)))
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530457
Yuyang Ducd126af2015-07-15 08:04:36 +0800458 if (!se)
Ben Segall18bf2802012-10-04 12:51:20 +0200459 return;
Ben Segall18bf2802012-10-04 12:51:20 +0200460
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530461 PN(se->exec_start);
462 PN(se->vruntime);
463 PN(se->sum_exec_runtime);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100464
Mel Gormancb251762016-02-05 09:08:36 +0000465 if (schedstat_enabled()) {
Peter Zijlstra769fdf82021-10-06 10:12:05 +0200466 struct sched_statistics *stats;
467 stats = __schedstats_from_se(se);
Yafang Shaoceeadb82021-09-05 14:35:41 +0000468
469 PN_SCHEDSTAT(wait_start);
470 PN_SCHEDSTAT(sleep_start);
471 PN_SCHEDSTAT(block_start);
472 PN_SCHEDSTAT(sleep_max);
473 PN_SCHEDSTAT(block_max);
474 PN_SCHEDSTAT(exec_max);
475 PN_SCHEDSTAT(slice_max);
476 PN_SCHEDSTAT(wait_max);
477 PN_SCHEDSTAT(wait_sum);
478 P_SCHEDSTAT(wait_count);
Mel Gormancb251762016-02-05 09:08:36 +0000479 }
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100480
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530481 P(se->load.weight);
Paul Turner9d85f212012-10-04 13:18:29 +0200482#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800483 P(se->avg.load_avg);
484 P(se->avg.util_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000485 P(se->avg.runnable_avg);
Paul Turner9d85f212012-10-04 13:18:29 +0200486#endif
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500487
488#undef PN_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530489#undef PN
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500490#undef P_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530491#undef P
492}
493#endif
494
Bharata B Raoefe25c22011-01-11 15:41:54 +0530495#ifdef CONFIG_CGROUP_SCHED
Waiman Longad789f82021-04-15 15:54:26 -0400496static DEFINE_SPINLOCK(sched_debug_lock);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530497static char group_path[PATH_MAX];
498
Waiman Longad789f82021-04-15 15:54:26 -0400499static void task_group_path(struct task_group *tg, char *path, int plen)
Bharata B Raoefe25c22011-01-11 15:41:54 +0530500{
Waiman Longad789f82021-04-15 15:54:26 -0400501 if (autogroup_path(tg, path, plen))
502 return;
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530503
Waiman Longad789f82021-04-15 15:54:26 -0400504 cgroup_path(tg->css.cgroup, path, plen);
505}
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100506
Waiman Longad789f82021-04-15 15:54:26 -0400507/*
508 * Only 1 SEQ_printf_task_group_path() caller can use the full length
509 * group_path[] for cgroup path. Other simultaneous callers will have
510 * to use a shorter stack buffer. A "..." suffix is appended at the end
511 * of the stack buffer so that it will show up in case the output length
512 * matches the given buffer size to indicate possible path name truncation.
513 */
514#define SEQ_printf_task_group_path(m, tg, fmt...) \
515{ \
516 if (spin_trylock(&sched_debug_lock)) { \
517 task_group_path(tg, group_path, sizeof(group_path)); \
518 SEQ_printf(m, fmt, group_path); \
519 spin_unlock(&sched_debug_lock); \
520 } else { \
521 char buf[128]; \
522 char *bufend = buf + sizeof(buf) - 3; \
523 task_group_path(tg, buf, bufend - buf); \
524 strcpy(bufend - 1, "..."); \
525 SEQ_printf(m, fmt, buf); \
526 } \
Bharata B Raoefe25c22011-01-11 15:41:54 +0530527}
528#endif
529
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200530static void
Ingo Molnara48da482007-08-09 11:16:51 +0200531print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200532{
Hui Su65bcf072020-10-31 01:32:23 +0800533 if (task_current(rq, p))
Xie XiuQie8c16492017-08-07 16:44:22 +0800534 SEQ_printf(m, ">R");
Xie XiuQi20435d82017-08-07 16:44:23 +0800535 else
536 SEQ_printf(m, " %c", task_state_to_char(p));
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200537
Xie XiuQif080d932020-04-14 20:57:21 +0800538 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200539 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200540 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200541 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100542 p->prio);
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500543
Yafang Shao847fc0c2021-09-05 14:35:43 +0000544 SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
Yafang Shaoceeadb82021-09-05 14:35:41 +0000545 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530546 SPLIT_NS(p->se.sum_exec_runtime),
Yafang Shao847fc0c2021-09-05 14:35:43 +0000547 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
548 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500549
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100550#ifdef CONFIG_NUMA_BALANCING
Srikar Dronamrajue3d24d02015-06-25 22:51:42 +0530551 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100552#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530553#ifdef CONFIG_CGROUP_SCHED
Waiman Longad789f82021-04-15 15:54:26 -0400554 SEQ_printf_task_group_path(m, task_group(p), " %s")
Bharata B Raoefe25c22011-01-11 15:41:54 +0530555#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200556
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200557 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200558}
559
Ingo Molnara48da482007-08-09 11:16:51 +0200560static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200561{
562 struct task_struct *g, *p;
563
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400564 SEQ_printf(m, "\n");
565 SEQ_printf(m, "runnable tasks:\n");
Xie XiuQif080d932020-04-14 20:57:21 +0800566 SEQ_printf(m, " S task PID tree-key switches prio"
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400567 " wait-time sum-exec sum-sleep\n");
568 SEQ_printf(m, "-------------------------------------------------------"
Xie XiuQif080d932020-04-14 20:57:21 +0800569 "------------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200570
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200571 rcu_read_lock();
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200572 for_each_process_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100573 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200574 continue;
575
Ingo Molnara48da482007-08-09 11:16:51 +0200576 print_task(m, rq, p);
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200577 }
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200578 rcu_read_unlock();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200579}
580
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200581void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200582{
Ingo Molnar86d95602007-10-15 17:00:06 +0200583 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
584 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900585 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200586 struct sched_entity *last;
587 unsigned long flags;
588
Bharata B Raoefe25c22011-01-11 15:41:54 +0530589#ifdef CONFIG_FAIR_GROUP_SCHED
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400590 SEQ_printf(m, "\n");
Waiman Longad789f82021-04-15 15:54:26 -0400591 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530592#else
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400593 SEQ_printf(m, "\n");
594 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530595#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200596 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
597 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200598
Peter Zijlstra5cb9eaa2020-11-17 18:19:31 -0500599 raw_spin_rq_lock_irqsave(rq, flags);
Davidlohr Buesobfb06882017-09-08 16:14:55 -0700600 if (rb_first_cached(&cfs_rq->tasks_timeline))
Rik van Rielac53db52011-02-01 09:51:03 -0500601 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200602 last = __pick_last_entity(cfs_rq);
603 if (last)
604 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100605 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900606 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Peter Zijlstra5cb9eaa2020-11-17 18:19:31 -0500607 raw_spin_rq_unlock_irqrestore(rq, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200608 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
609 SPLIT_NS(MIN_vruntime));
610 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
611 SPLIT_NS(min_vruntime));
612 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
613 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200614 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200615 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
616 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200617 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200618 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
619 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100620 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200621 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200622 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Josh Don30400032021-07-29 19:00:18 -0700623 SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
Josh Dona480add2021-08-19 18:04:01 -0700624 SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
625 cfs_rq->idle_nr_running);
Josh Don30400032021-07-29 19:00:18 -0700626 SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
627 cfs_rq->idle_h_nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800628 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200629#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800630 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
631 cfs_rq->avg.load_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000632 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
633 cfs_rq->avg.runnable_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800634 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
635 cfs_rq->avg.util_avg);
Patrick Bellasi7f65ea42018-03-09 09:52:42 +0000636 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
637 cfs_rq->avg.util_est.enqueued);
Peter Zijlstra2a2f5d4e2017-05-08 16:51:41 +0200638 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
639 cfs_rq->removed.load_avg);
640 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
641 cfs_rq->removed.util_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000642 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
643 cfs_rq->removed.runnable_avg);
Alex Shi333bb862013-06-28 19:10:35 +0800644#ifdef CONFIG_FAIR_GROUP_SCHED
Yuyang Du9d89c252015-07-15 08:04:37 +0800645 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
646 cfs_rq->tg_load_avg_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800647 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
648 atomic_long_read(&cfs_rq->tg->load_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200649#endif
Alex Shi333bb862013-06-28 19:10:35 +0800650#endif
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700651#ifdef CONFIG_CFS_BANDWIDTH
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700652 SEQ_printf(m, " .%-30s: %d\n", "throttled",
653 cfs_rq->throttled);
654 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
655 cfs_rq->throttle_count);
656#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800657
Alex Shi333bb862013-06-28 19:10:35 +0800658#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530659 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200660#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200661}
662
Peter Zijlstraada18de2008-06-19 14:22:24 +0200663void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
664{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530665#ifdef CONFIG_RT_GROUP_SCHED
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400666 SEQ_printf(m, "\n");
Waiman Longad789f82021-04-15 15:54:26 -0400667 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530668#else
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400669 SEQ_printf(m, "\n");
670 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530671#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200672
673#define P(x) \
674 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200675#define PU(x) \
676 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
Peter Zijlstraada18de2008-06-19 14:22:24 +0200677#define PN(x) \
678 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
679
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200680 PU(rt_nr_running);
681#ifdef CONFIG_SMP
682 PU(rt_nr_migratory);
683#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200684 P(rt_throttled);
685 PN(rt_time);
686 PN(rt_runtime);
687
688#undef PN
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200689#undef PU
Peter Zijlstraada18de2008-06-19 14:22:24 +0200690#undef P
691}
692
Wanpeng Liacb32132014-10-31 06:39:33 +0800693void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
694{
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500695 struct dl_bw *dl_bw;
696
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400697 SEQ_printf(m, "\n");
698 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200699
700#define PU(x) \
701 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
702
703 PU(dl_nr_running);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500704#ifdef CONFIG_SMP
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200705 PU(dl_nr_migratory);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500706 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
707#else
708 dl_bw = &dl_rq->dl_bw;
709#endif
710 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
711 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200712
713#undef PU
Wanpeng Liacb32132014-10-31 06:39:33 +0800714}
715
Ingo Molnara48da482007-08-09 11:16:51 +0200716static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200717{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900718 struct rq *rq = cpu_rq(cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200719
720#ifdef CONFIG_X86
721 {
722 unsigned int freq = cpu_khz ? : 1;
723
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800724 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200725 cpu, freq / 1000, (freq % 1000));
726 }
727#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800728 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200729#endif
730
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200731#define P(x) \
732do { \
733 if (sizeof(rq->x) == 4) \
734 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
735 else \
736 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
737} while (0)
738
Ingo Molnaref83a572007-10-15 17:00:08 +0200739#define PN(x) \
740 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200741
742 P(nr_running);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200743 P(nr_switches);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200744 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200745 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200746 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200747 PN(clock);
Peter Zijlstra5a537592015-01-05 11:18:12 +0100748 PN(clock_task);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200749#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200750#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200751
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100752#ifdef CONFIG_SMP
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800753#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100754 P64(avg_idle);
Alex Shi37e6bae2014-01-23 18:39:54 +0800755 P64(max_idle_balance_cost);
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800756#undef P64
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100757#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100758
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500759#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
Mel Gormancb251762016-02-05 09:08:36 +0000760 if (schedstat_enabled()) {
761 P(yld_count);
762 P(sched_count);
763 P(sched_goidle);
764 P(ttwu_count);
765 P(ttwu_local);
766 }
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100767#undef P
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500768
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200769 print_cfs_stats(m, cpu);
Peter Zijlstraada18de2008-06-19 14:22:24 +0200770 print_rt_stats(m, cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +0800771 print_dl_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200772
Ingo Molnara48da482007-08-09 11:16:51 +0200773 print_rq(m, rq, cpu);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800774 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200775}
776
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100777static const char *sched_tunable_scaling_names[] = {
778 "none",
Colin Ian Kingad2e3792018-11-28 15:23:50 +0000779 "logarithmic",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100780 "linear"
781};
782
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800783static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200784{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100785 u64 ktime, sched_clk, cpu_clk;
786 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200787
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100788 local_irq_save(flags);
789 ktime = ktime_to_ns(ktime_get());
790 sched_clk = sched_clock();
791 cpu_clk = local_clock();
792 local_irq_restore(flags);
793
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100794 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200795 init_utsname()->release,
796 (int)strcspn(init_utsname()->version, " "),
797 init_utsname()->version);
798
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100799#define P(x) \
800 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
801#define PN(x) \
802 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
803 PN(ktime);
804 PN(sched_clk);
805 PN(cpu_clk);
806 P(jiffies);
807#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Peter Zijlstra35af99e2013-11-28 19:38:42 +0100808 P(sched_clock_stable());
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100809#endif
810#undef PN
811#undef P
812
813 SEQ_printf(m, "\n");
814 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200815
Ingo Molnar1aa47312007-10-15 17:00:10 +0200816#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200817 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200818#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200819 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200820 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100821 PN(sysctl_sched_min_granularity);
Josh Don51ce83e2021-08-19 18:04:02 -0700822 PN(sysctl_sched_idle_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200823 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700824 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200825 P(sysctl_sched_features);
826#undef PN
827#undef P
828
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800829 SEQ_printf(m, " .%-40s: %d (%s)\n",
830 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100831 sysctl_sched_tunable_scaling,
832 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200833 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800834}
835
836static int sched_debug_show(struct seq_file *m, void *v)
837{
838 int cpu = (unsigned long)(v - 2);
839
840 if (cpu != -1)
841 print_cpu(m, cpu);
842 else
843 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200844
845 return 0;
846}
847
Peter Zijlstra029632f2011-10-25 10:00:11 +0200848void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200849{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800850 int cpu;
851
852 sched_debug_header(NULL);
Wei Li02d4ac52019-12-26 16:52:24 +0800853 for_each_online_cpu(cpu) {
854 /*
855 * Need to reset softlockup watchdogs on all CPUs, because
856 * another CPU might be blocked waiting for us to process
857 * an IPI or stop_machine.
858 */
859 touch_nmi_watchdog();
860 touch_all_softlockup_watchdogs();
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800861 print_cpu(NULL, cpu);
Wei Li02d4ac52019-12-26 16:52:24 +0800862 }
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800863}
864
865/*
Ingo Molnar3b037062021-03-18 13:38:50 +0100866 * This iterator needs some explanation.
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800867 * It returns 1 for the header position.
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100868 * This means 2 is CPU 0.
869 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
870 * to use cpumask_* to iterate over the CPUs.
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800871 */
872static void *sched_debug_start(struct seq_file *file, loff_t *offset)
873{
874 unsigned long n = *offset;
875
876 if (n == 0)
877 return (void *) 1;
878
879 n--;
880
881 if (n > 0)
882 n = cpumask_next(n - 1, cpu_online_mask);
883 else
884 n = cpumask_first(cpu_online_mask);
885
886 *offset = n + 1;
887
888 if (n < nr_cpu_ids)
889 return (void *)(unsigned long)(n + 2);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100890
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800891 return NULL;
892}
893
894static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
895{
896 (*offset)++;
897 return sched_debug_start(file, offset);
898}
899
900static void sched_debug_stop(struct seq_file *file, void *data)
901{
902}
903
904static const struct seq_operations sched_debug_sops = {
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100905 .start = sched_debug_start,
906 .next = sched_debug_next,
907 .stop = sched_debug_stop,
908 .show = sched_debug_show,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800909};
910
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000911#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
912#define __P(F) __PS(#F, F)
913#define P(F) __PS(#F, p->F)
Dietmar Eggemann68d7a192021-06-02 16:58:08 +0200914#define PM(F, M) __PS(#F, p->F & (M))
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000915#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
916#define __PN(F) __PSN(#F, F)
917#define PN(F) __PSN(#F, p->F)
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100918
919
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530920#ifdef CONFIG_NUMA_BALANCING
921void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
922 unsigned long tpf, unsigned long gsf, unsigned long gpf)
923{
924 SEQ_printf(m, "numa_faults node=%d ", node);
Srikar Dronamraju67d9f6c252018-06-20 22:32:47 +0530925 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
926 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530927}
928#endif
929
930
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100931static void sched_show_numa(struct task_struct *p, struct seq_file *m)
932{
933#ifdef CONFIG_NUMA_BALANCING
934 struct mempolicy *pol;
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100935
936 if (p->mm)
937 P(mm->numa_scan_seq);
938
939 task_lock(p);
940 pol = p->mempolicy;
941 if (pol && !(pol->flags & MPOL_F_MORON))
942 pol = NULL;
943 mpol_get(pol);
944 task_unlock(p);
945
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530946 P(numa_pages_migrated);
947 P(numa_preferred_nid);
948 P(total_numa_faults);
949 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
950 task_node(p), task_numa_group_id(p));
951 show_numa_stats(p, m);
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100952 mpol_put(pol);
953#endif
954}
955
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000956void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
957 struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200958{
Ingo Molnarcc367732007-10-15 17:00:18 +0200959 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200960
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000961 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700962 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200963 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530964 "---------------------------------------------------------"
965 "----------\n");
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000966
Yafang Shaoceeadb82021-09-05 14:35:41 +0000967#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
968#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200969
Ingo Molnaref83a572007-10-15 17:00:08 +0200970 PN(se.exec_start);
971 PN(se.vruntime);
972 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200973
Ingo Molnarcc367732007-10-15 17:00:18 +0200974 nr_switches = p->nvcsw + p->nivcsw;
975
Ingo Molnarcc367732007-10-15 17:00:18 +0200976 P(se.nr_migrations);
Ingo Molnarcc367732007-10-15 17:00:18 +0200977
Mel Gormancb251762016-02-05 09:08:36 +0000978 if (schedstat_enabled()) {
Ingo Molnarcc367732007-10-15 17:00:18 +0200979 u64 avg_atom, avg_per_cpu;
980
Yafang Shaoceeadb82021-09-05 14:35:41 +0000981 PN_SCHEDSTAT(sum_sleep_runtime);
Yafang Shao847fc0c2021-09-05 14:35:43 +0000982 PN_SCHEDSTAT(sum_block_runtime);
Yafang Shaoceeadb82021-09-05 14:35:41 +0000983 PN_SCHEDSTAT(wait_start);
984 PN_SCHEDSTAT(sleep_start);
985 PN_SCHEDSTAT(block_start);
986 PN_SCHEDSTAT(sleep_max);
987 PN_SCHEDSTAT(block_max);
988 PN_SCHEDSTAT(exec_max);
989 PN_SCHEDSTAT(slice_max);
990 PN_SCHEDSTAT(wait_max);
991 PN_SCHEDSTAT(wait_sum);
992 P_SCHEDSTAT(wait_count);
993 PN_SCHEDSTAT(iowait_sum);
994 P_SCHEDSTAT(iowait_count);
995 P_SCHEDSTAT(nr_migrations_cold);
996 P_SCHEDSTAT(nr_failed_migrations_affine);
997 P_SCHEDSTAT(nr_failed_migrations_running);
998 P_SCHEDSTAT(nr_failed_migrations_hot);
999 P_SCHEDSTAT(nr_forced_migrations);
1000 P_SCHEDSTAT(nr_wakeups);
1001 P_SCHEDSTAT(nr_wakeups_sync);
1002 P_SCHEDSTAT(nr_wakeups_migrate);
1003 P_SCHEDSTAT(nr_wakeups_local);
1004 P_SCHEDSTAT(nr_wakeups_remote);
1005 P_SCHEDSTAT(nr_wakeups_affine);
1006 P_SCHEDSTAT(nr_wakeups_affine_attempts);
1007 P_SCHEDSTAT(nr_wakeups_passive);
1008 P_SCHEDSTAT(nr_wakeups_idle);
Mel Gormancb251762016-02-05 09:08:36 +00001009
Ingo Molnarcc367732007-10-15 17:00:18 +02001010 avg_atom = p->se.sum_exec_runtime;
1011 if (nr_switches)
Mateusz Guzikb0ab99e2014-06-14 15:00:09 +02001012 avg_atom = div64_ul(avg_atom, nr_switches);
Ingo Molnarcc367732007-10-15 17:00:18 +02001013 else
1014 avg_atom = -1LL;
1015
1016 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a89742007-11-28 15:52:56 +01001017 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -07001018 avg_per_cpu = div64_u64(avg_per_cpu,
1019 p->se.nr_migrations);
Ingo Molnarc1a89742007-11-28 15:52:56 +01001020 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +02001021 avg_per_cpu = -1LL;
Ingo Molnarc1a89742007-11-28 15:52:56 +01001022 }
Ingo Molnarcc367732007-10-15 17:00:18 +02001023
1024 __PN(avg_atom);
1025 __PN(avg_per_cpu);
1026 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001027
Ingo Molnarcc367732007-10-15 17:00:18 +02001028 __P(nr_switches);
Valentin Schneider9e3bf942020-02-26 12:45:42 +00001029 __PS("nr_voluntary_switches", p->nvcsw);
1030 __PS("nr_involuntary_switches", p->nivcsw);
Ingo Molnarcc367732007-10-15 17:00:18 +02001031
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001032 P(se.load.weight);
Alex Shi333bb862013-06-28 19:10:35 +08001033#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +08001034 P(se.avg.load_sum);
Vincent Guittot9f683952020-02-24 09:52:18 +00001035 P(se.avg.runnable_sum);
Yuyang Du9d89c252015-07-15 08:04:37 +08001036 P(se.avg.util_sum);
1037 P(se.avg.load_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +00001038 P(se.avg.runnable_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +08001039 P(se.avg.util_avg);
1040 P(se.avg.last_update_time);
Patrick Bellasi7f65ea42018-03-09 09:52:42 +00001041 P(se.avg.util_est.ewma);
Dietmar Eggemann68d7a192021-06-02 16:58:08 +02001042 PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
Kamalesh Babulal939fd732013-06-25 13:33:36 +05301043#endif
Valentin Schneider96e74eb2020-02-26 12:45:43 +00001044#ifdef CONFIG_UCLAMP_TASK
Pavankumar Kondetiad32bb42020-05-10 18:26:41 +05301045 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1046 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
Valentin Schneider96e74eb2020-02-26 12:45:43 +00001047 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1048 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1049#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001050 P(policy);
1051 P(prio);
Viresh Kumar1da18432018-11-05 16:51:55 +05301052 if (task_has_dl_policy(p)) {
Tommaso Cucinotta59f8c292016-10-26 11:17:17 +02001053 P(dl.runtime);
1054 P(dl.deadline);
1055 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001056#undef PN_SCHEDSTAT
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001057#undef P_SCHEDSTAT
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001058
1059 {
Ingo Molnar29d7b902008-11-16 08:07:15 +01001060 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001061 u64 t0, t1;
1062
Ingo Molnar29d7b902008-11-16 08:07:15 +01001063 t0 = cpu_clock(this_cpu);
1064 t1 = cpu_clock(this_cpu);
Valentin Schneider9e3bf942020-02-26 12:45:42 +00001065 __PS("clock-delta", t1-t0);
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001066 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001067
1068 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001069}
1070
1071void proc_sched_set_task(struct task_struct *p)
1072{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001073#ifdef CONFIG_SCHEDSTATS
Yafang Shaoceeadb82021-09-05 14:35:41 +00001074 memset(&p->stats, 0, sizeof(p->stats));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001075#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001076}
Paul Turnerc006fac2021-04-16 14:29:36 -07001077
1078void resched_latency_warn(int cpu, u64 latency)
1079{
1080 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1081
1082 WARN(__ratelimit(&latency_check_ratelimit),
1083 "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1084 "without schedule\n",
1085 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1086}