Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * trace context switch |
| 3 | * |
| 4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> |
| 5 | * |
| 6 | */ |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/debugfs.h> |
| 10 | #include <linux/kallsyms.h> |
| 11 | #include <linux/uaccess.h> |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 12 | #include <linux/ftrace.h> |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 13 | #include <trace/events/sched.h> |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 14 | |
| 15 | #include "trace.h" |
| 16 | |
| 17 | static struct trace_array *ctx_trace; |
| 18 | static int __read_mostly tracer_enabled; |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 19 | static int sched_ref; |
| 20 | static DEFINE_MUTEX(sched_register_mutex); |
Steven Rostedt | 5fec6dd | 2009-03-17 19:59:53 -0400 | [diff] [blame] | 21 | static int sched_stopped; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 22 | |
Frederic Weisbecker | 82e04af | 2009-07-29 18:00:29 +0200 | [diff] [blame] | 23 | |
| 24 | void |
| 25 | tracing_sched_switch_trace(struct trace_array *tr, |
| 26 | struct task_struct *prev, |
| 27 | struct task_struct *next, |
| 28 | unsigned long flags, int pc) |
| 29 | { |
| 30 | struct ftrace_event_call *call = &event_context_switch; |
Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 31 | struct ring_buffer *buffer = tr->buffer; |
Frederic Weisbecker | 82e04af | 2009-07-29 18:00:29 +0200 | [diff] [blame] | 32 | struct ring_buffer_event *event; |
| 33 | struct ctx_switch_entry *entry; |
| 34 | |
Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, |
Frederic Weisbecker | 82e04af | 2009-07-29 18:00:29 +0200 | [diff] [blame] | 36 | sizeof(*entry), flags, pc); |
| 37 | if (!event) |
| 38 | return; |
| 39 | entry = ring_buffer_event_data(event); |
| 40 | entry->prev_pid = prev->pid; |
| 41 | entry->prev_prio = prev->prio; |
| 42 | entry->prev_state = prev->state; |
| 43 | entry->next_pid = next->pid; |
| 44 | entry->next_prio = next->prio; |
| 45 | entry->next_state = next->state; |
| 46 | entry->next_cpu = task_cpu(next); |
| 47 | |
Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 48 | if (!filter_check_discard(call, entry, buffer, event)) |
| 49 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
Frederic Weisbecker | 82e04af | 2009-07-29 18:00:29 +0200 | [diff] [blame] | 50 | } |
| 51 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 52 | static void |
Steven Rostedt | 38516ab | 2010-04-20 17:04:50 -0400 | [diff] [blame] | 53 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 54 | { |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 55 | struct trace_array_cpu *data; |
| 56 | unsigned long flags; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 57 | int cpu; |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 58 | int pc; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 59 | |
Zhaolei | dcef788 | 2009-03-31 15:26:14 +0800 | [diff] [blame] | 60 | if (unlikely(!sched_ref)) |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 61 | return; |
| 62 | |
Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 63 | tracing_record_cmdline(prev); |
| 64 | tracing_record_cmdline(next); |
| 65 | |
Zhaolei | dcef788 | 2009-03-31 15:26:14 +0800 | [diff] [blame] | 66 | if (!tracer_enabled || sched_stopped) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 67 | return; |
| 68 | |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 69 | pc = preempt_count(); |
Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 70 | local_irq_save(flags); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 71 | cpu = raw_smp_processor_id(); |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 72 | data = ctx_trace->data[cpu]; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 73 | |
Steven Rostedt | 3ea2e6d | 2008-10-04 02:01:00 -0400 | [diff] [blame] | 74 | if (likely(!atomic_read(&data->disabled))) |
Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 75 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 76 | |
Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 77 | local_irq_restore(flags); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 78 | } |
| 79 | |
Frederic Weisbecker | 82e04af | 2009-07-29 18:00:29 +0200 | [diff] [blame] | 80 | void |
| 81 | tracing_sched_wakeup_trace(struct trace_array *tr, |
| 82 | struct task_struct *wakee, |
| 83 | struct task_struct *curr, |
| 84 | unsigned long flags, int pc) |
| 85 | { |
| 86 | struct ftrace_event_call *call = &event_wakeup; |
| 87 | struct ring_buffer_event *event; |
| 88 | struct ctx_switch_entry *entry; |
Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 89 | struct ring_buffer *buffer = tr->buffer; |
Frederic Weisbecker | 82e04af | 2009-07-29 18:00:29 +0200 | [diff] [blame] | 90 | |
Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 91 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
Frederic Weisbecker | 82e04af | 2009-07-29 18:00:29 +0200 | [diff] [blame] | 92 | sizeof(*entry), flags, pc); |
| 93 | if (!event) |
| 94 | return; |
| 95 | entry = ring_buffer_event_data(event); |
| 96 | entry->prev_pid = curr->pid; |
| 97 | entry->prev_prio = curr->prio; |
| 98 | entry->prev_state = curr->state; |
| 99 | entry->next_pid = wakee->pid; |
| 100 | entry->next_prio = wakee->prio; |
| 101 | entry->next_state = wakee->state; |
| 102 | entry->next_cpu = task_cpu(wakee); |
| 103 | |
Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 104 | if (!filter_check_discard(call, entry, buffer, event)) |
Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 105 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
Frederic Weisbecker | 82e04af | 2009-07-29 18:00:29 +0200 | [diff] [blame] | 106 | } |
| 107 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 108 | static void |
Steven Rostedt | 38516ab | 2010-04-20 17:04:50 -0400 | [diff] [blame] | 109 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 110 | { |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 111 | struct trace_array_cpu *data; |
| 112 | unsigned long flags; |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 113 | int cpu, pc; |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 114 | |
Zhaolei | dcef788 | 2009-03-31 15:26:14 +0800 | [diff] [blame] | 115 | if (unlikely(!sched_ref)) |
| 116 | return; |
| 117 | |
| 118 | tracing_record_cmdline(current); |
| 119 | |
| 120 | if (!tracer_enabled || sched_stopped) |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 121 | return; |
| 122 | |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 123 | pc = preempt_count(); |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 124 | local_irq_save(flags); |
| 125 | cpu = raw_smp_processor_id(); |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 126 | data = ctx_trace->data[cpu]; |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 127 | |
Steven Rostedt | 3ea2e6d | 2008-10-04 02:01:00 -0400 | [diff] [blame] | 128 | if (likely(!atomic_read(&data->disabled))) |
Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 129 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 130 | flags, pc); |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 131 | |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 132 | local_irq_restore(flags); |
| 133 | } |
| 134 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 135 | static int tracing_sched_register(void) |
| 136 | { |
| 137 | int ret; |
| 138 | |
Steven Rostedt | 38516ab | 2010-04-20 17:04:50 -0400 | [diff] [blame] | 139 | ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 140 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 141 | pr_info("wakeup trace: Couldn't activate tracepoint" |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 142 | " probe to kernel_sched_wakeup\n"); |
| 143 | return ret; |
| 144 | } |
| 145 | |
Steven Rostedt | 38516ab | 2010-04-20 17:04:50 -0400 | [diff] [blame] | 146 | ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 147 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 148 | pr_info("wakeup trace: Couldn't activate tracepoint" |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 149 | " probe to kernel_sched_wakeup_new\n"); |
| 150 | goto fail_deprobe; |
| 151 | } |
| 152 | |
Steven Rostedt | 38516ab | 2010-04-20 17:04:50 -0400 | [diff] [blame] | 153 | ret = register_trace_sched_switch(probe_sched_switch, NULL); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 154 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 155 | pr_info("sched trace: Couldn't activate tracepoint" |
Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 156 | " probe to kernel_sched_switch\n"); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 157 | goto fail_deprobe_wake_new; |
| 158 | } |
| 159 | |
| 160 | return ret; |
| 161 | fail_deprobe_wake_new: |
Steven Rostedt | 38516ab | 2010-04-20 17:04:50 -0400 | [diff] [blame] | 162 | unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 163 | fail_deprobe: |
Steven Rostedt | 38516ab | 2010-04-20 17:04:50 -0400 | [diff] [blame] | 164 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 165 | return ret; |
| 166 | } |
| 167 | |
| 168 | static void tracing_sched_unregister(void) |
| 169 | { |
Steven Rostedt | 38516ab | 2010-04-20 17:04:50 -0400 | [diff] [blame] | 170 | unregister_trace_sched_switch(probe_sched_switch, NULL); |
| 171 | unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
| 172 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 173 | } |
| 174 | |
Ingo Molnar | f225293 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 175 | static void tracing_start_sched_switch(void) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 176 | { |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 177 | mutex_lock(&sched_register_mutex); |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 178 | if (!(sched_ref++)) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 179 | tracing_sched_register(); |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 180 | mutex_unlock(&sched_register_mutex); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 181 | } |
| 182 | |
Ingo Molnar | f225293 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 183 | static void tracing_stop_sched_switch(void) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 184 | { |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 185 | mutex_lock(&sched_register_mutex); |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 186 | if (!(--sched_ref)) |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 187 | tracing_sched_unregister(); |
Frederic Weisbecker | efade6e | 2008-10-31 13:28:58 +0100 | [diff] [blame] | 188 | mutex_unlock(&sched_register_mutex); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 189 | } |
| 190 | |
Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 191 | void tracing_start_cmdline_record(void) |
| 192 | { |
| 193 | tracing_start_sched_switch(); |
| 194 | } |
| 195 | |
| 196 | void tracing_stop_cmdline_record(void) |
| 197 | { |
| 198 | tracing_stop_sched_switch(); |
| 199 | } |
| 200 | |
Steven Rostedt | 75f5c47 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 201 | /** |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 202 | * tracing_start_sched_switch_record - start tracing context switches |
| 203 | * |
| 204 | * Turns on context switch tracing for a tracer. |
| 205 | */ |
| 206 | void tracing_start_sched_switch_record(void) |
| 207 | { |
| 208 | if (unlikely(!ctx_trace)) { |
| 209 | WARN_ON(1); |
| 210 | return; |
| 211 | } |
| 212 | |
| 213 | tracing_start_sched_switch(); |
| 214 | |
| 215 | mutex_lock(&sched_register_mutex); |
| 216 | tracer_enabled++; |
| 217 | mutex_unlock(&sched_register_mutex); |
| 218 | } |
| 219 | |
| 220 | /** |
| 221 | * tracing_stop_sched_switch_record - start tracing context switches |
| 222 | * |
| 223 | * Turns off context switch tracing for a tracer. |
| 224 | */ |
| 225 | void tracing_stop_sched_switch_record(void) |
| 226 | { |
| 227 | mutex_lock(&sched_register_mutex); |
| 228 | tracer_enabled--; |
| 229 | WARN_ON(tracer_enabled < 0); |
| 230 | mutex_unlock(&sched_register_mutex); |
| 231 | |
| 232 | tracing_stop_sched_switch(); |
| 233 | } |
| 234 | |
| 235 | /** |
| 236 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch |
Steven Rostedt | 75f5c47 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 237 | * @tr: trace array pointer to assign |
| 238 | * |
| 239 | * Some tracers might want to record the context switches in their |
| 240 | * trace. This function lets those tracers assign the trace array |
| 241 | * to use. |
| 242 | */ |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 243 | void tracing_sched_switch_assign_trace(struct trace_array *tr) |
Steven Rostedt | 75f5c47 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 244 | { |
| 245 | ctx_trace = tr; |
| 246 | } |
| 247 | |