Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * trace context switch |
| 3 | * |
| 4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> |
| 5 | * |
| 6 | */ |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/debugfs.h> |
| 10 | #include <linux/kallsyms.h> |
| 11 | #include <linux/uaccess.h> |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 12 | #include <linux/ftrace.h> |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 13 | #include <trace/sched.h> |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 14 | |
| 15 | #include "trace.h" |
| 16 | |
| 17 | static struct trace_array *ctx_trace; |
| 18 | static int __read_mostly tracer_enabled; |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 19 | static int sched_ref; |
| 20 | static DEFINE_MUTEX(sched_register_mutex); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 21 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 22 | static void |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 23 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 24 | struct task_struct *next) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 25 | { |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 26 | struct trace_array_cpu *data; |
| 27 | unsigned long flags; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | int cpu; |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 29 | int pc; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 30 | |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 31 | if (!sched_ref) |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 32 | return; |
| 33 | |
Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 34 | tracing_record_cmdline(prev); |
| 35 | tracing_record_cmdline(next); |
| 36 | |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 37 | if (!tracer_enabled) |
| 38 | return; |
| 39 | |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 40 | pc = preempt_count(); |
Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 41 | local_irq_save(flags); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 42 | cpu = raw_smp_processor_id(); |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 43 | data = ctx_trace->data[cpu]; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 44 | |
Steven Rostedt | 3ea2e6d | 2008-10-04 02:01:00 -0400 | [diff] [blame] | 45 | if (likely(!atomic_read(&data->disabled))) |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 46 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 47 | |
Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 48 | local_irq_restore(flags); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 49 | } |
| 50 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 51 | static void |
Peter Zijlstra | 468a15b | 2008-12-16 08:07:03 +0100 | [diff] [blame] | 52 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 53 | { |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 54 | struct trace_array_cpu *data; |
| 55 | unsigned long flags; |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 56 | int cpu, pc; |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 57 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 58 | if (!likely(tracer_enabled)) |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 59 | return; |
| 60 | |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 61 | pc = preempt_count(); |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 62 | tracing_record_cmdline(current); |
Ingo Molnar | d9af56f | 2008-05-12 21:20:53 +0200 | [diff] [blame] | 63 | |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 64 | local_irq_save(flags); |
| 65 | cpu = raw_smp_processor_id(); |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 66 | data = ctx_trace->data[cpu]; |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 67 | |
Steven Rostedt | 3ea2e6d | 2008-10-04 02:01:00 -0400 | [diff] [blame] | 68 | if (likely(!atomic_read(&data->disabled))) |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 69 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 70 | flags, pc); |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 71 | |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 72 | local_irq_restore(flags); |
| 73 | } |
| 74 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 75 | static int tracing_sched_register(void) |
| 76 | { |
| 77 | int ret; |
| 78 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 79 | ret = register_trace_sched_wakeup(probe_sched_wakeup); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 80 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 81 | pr_info("wakeup trace: Couldn't activate tracepoint" |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 82 | " probe to kernel_sched_wakeup\n"); |
| 83 | return ret; |
| 84 | } |
| 85 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 86 | ret = register_trace_sched_wakeup_new(probe_sched_wakeup); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 87 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 88 | pr_info("wakeup trace: Couldn't activate tracepoint" |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 89 | " probe to kernel_sched_wakeup_new\n"); |
| 90 | goto fail_deprobe; |
| 91 | } |
| 92 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 93 | ret = register_trace_sched_switch(probe_sched_switch); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 94 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 95 | pr_info("sched trace: Couldn't activate tracepoint" |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 96 | " probe to kernel_sched_schedule\n"); |
| 97 | goto fail_deprobe_wake_new; |
| 98 | } |
| 99 | |
| 100 | return ret; |
| 101 | fail_deprobe_wake_new: |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 102 | unregister_trace_sched_wakeup_new(probe_sched_wakeup); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 103 | fail_deprobe: |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 104 | unregister_trace_sched_wakeup(probe_sched_wakeup); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 105 | return ret; |
| 106 | } |
| 107 | |
| 108 | static void tracing_sched_unregister(void) |
| 109 | { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 110 | unregister_trace_sched_switch(probe_sched_switch); |
| 111 | unregister_trace_sched_wakeup_new(probe_sched_wakeup); |
| 112 | unregister_trace_sched_wakeup(probe_sched_wakeup); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 113 | } |
| 114 | |
Ingo Molnar | f225293 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 115 | static void tracing_start_sched_switch(void) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 116 | { |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 117 | mutex_lock(&sched_register_mutex); |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 118 | if (!(sched_ref++)) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 119 | tracing_sched_register(); |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 120 | mutex_unlock(&sched_register_mutex); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 121 | } |
| 122 | |
Ingo Molnar | f225293 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 123 | static void tracing_stop_sched_switch(void) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 124 | { |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 125 | mutex_lock(&sched_register_mutex); |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 126 | if (!(--sched_ref)) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 127 | tracing_sched_unregister(); |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 128 | mutex_unlock(&sched_register_mutex); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 129 | } |
| 130 | |
Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 131 | void tracing_start_cmdline_record(void) |
| 132 | { |
| 133 | tracing_start_sched_switch(); |
| 134 | } |
| 135 | |
| 136 | void tracing_stop_cmdline_record(void) |
| 137 | { |
| 138 | tracing_stop_sched_switch(); |
| 139 | } |
| 140 | |
Steven Rostedt | 75f5c47 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 141 | /** |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 142 | * tracing_start_sched_switch_record - start tracing context switches |
| 143 | * |
| 144 | * Turns on context switch tracing for a tracer. |
| 145 | */ |
| 146 | void tracing_start_sched_switch_record(void) |
| 147 | { |
| 148 | if (unlikely(!ctx_trace)) { |
| 149 | WARN_ON(1); |
| 150 | return; |
| 151 | } |
| 152 | |
| 153 | tracing_start_sched_switch(); |
| 154 | |
| 155 | mutex_lock(&sched_register_mutex); |
| 156 | tracer_enabled++; |
| 157 | mutex_unlock(&sched_register_mutex); |
| 158 | } |
| 159 | |
| 160 | /** |
| 161 | * tracing_stop_sched_switch_record - start tracing context switches |
| 162 | * |
| 163 | * Turns off context switch tracing for a tracer. |
| 164 | */ |
| 165 | void tracing_stop_sched_switch_record(void) |
| 166 | { |
| 167 | mutex_lock(&sched_register_mutex); |
| 168 | tracer_enabled--; |
| 169 | WARN_ON(tracer_enabled < 0); |
| 170 | mutex_unlock(&sched_register_mutex); |
| 171 | |
| 172 | tracing_stop_sched_switch(); |
| 173 | } |
| 174 | |
| 175 | /** |
| 176 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch |
Steven Rostedt | 75f5c47 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 177 | * @tr: trace array pointer to assign |
| 178 | * |
| 179 | * Some tracers might want to record the context switches in their |
| 180 | * trace. This function lets those tracers assign the trace array |
| 181 | * to use. |
| 182 | */ |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 183 | void tracing_sched_switch_assign_trace(struct trace_array *tr) |
Steven Rostedt | 75f5c47 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 184 | { |
| 185 | ctx_trace = tr; |
| 186 | } |
| 187 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 188 | static void start_sched_trace(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 189 | { |
Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 190 | tracing_reset_online_cpus(tr); |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 191 | tracing_start_sched_switch_record(); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 192 | } |
| 193 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 194 | static void stop_sched_trace(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 195 | { |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 196 | tracing_stop_sched_switch_record(); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 197 | } |
| 198 | |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 199 | static int sched_switch_trace_init(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 200 | { |
| 201 | ctx_trace = tr; |
Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 202 | start_sched_trace(tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 203 | return 0; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 204 | } |
| 205 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 206 | static void sched_switch_trace_reset(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 207 | { |
Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 208 | if (sched_ref) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 209 | stop_sched_trace(tr); |
| 210 | } |
| 211 | |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 212 | static void sched_switch_trace_start(struct trace_array *tr) |
| 213 | { |
Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 214 | tracing_reset_online_cpus(tr); |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 215 | tracing_start_sched_switch(); |
| 216 | } |
| 217 | |
| 218 | static void sched_switch_trace_stop(struct trace_array *tr) |
| 219 | { |
| 220 | tracing_stop_sched_switch(); |
| 221 | } |
| 222 | |
Steven Rostedt | 75f5c47 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 223 | static struct tracer sched_switch_trace __read_mostly = |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 224 | { |
| 225 | .name = "sched_switch", |
| 226 | .init = sched_switch_trace_init, |
| 227 | .reset = sched_switch_trace_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 228 | .start = sched_switch_trace_start, |
| 229 | .stop = sched_switch_trace_stop, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 230 | #ifdef CONFIG_FTRACE_SELFTEST |
| 231 | .selftest = trace_selftest_startup_sched_switch, |
| 232 | #endif |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 233 | }; |
| 234 | |
| 235 | __init static int init_sched_switch_trace(void) |
| 236 | { |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 237 | return register_tracer(&sched_switch_trace); |
| 238 | } |
| 239 | device_initcall(init_sched_switch_trace); |
Ingo Molnar | c71dd42 | 2008-12-19 01:09:51 +0100 | [diff] [blame] | 240 | |