blob: 5fca0f51fde4ac27df4c733c6ebd827c34d661b2 [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020012#include <linux/ftrace.h>
Steven Rostedtad8d75f2009-04-14 19:39:12 -040013#include <trace/events/sched.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020014
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +010019static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
Steven Rostedt5fec6dd2009-03-17 19:59:53 -040021static int sched_stopped;
Steven Rostedt35e8e302008-05-12 21:20:42 +020022
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020023
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
29{
30 struct ftrace_event_call *call = &event_context_switch;
Steven Rostedte77405a2009-09-02 14:17:06 -040031 struct ring_buffer *buffer = tr->buffer;
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020032 struct ring_buffer_event *event;
33 struct ctx_switch_entry *entry;
34
Steven Rostedte77405a2009-09-02 14:17:06 -040035 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020036 sizeof(*entry), flags, pc);
37 if (!event)
38 return;
39 entry = ring_buffer_event_data(event);
40 entry->prev_pid = prev->pid;
41 entry->prev_prio = prev->prio;
42 entry->prev_state = prev->state;
43 entry->next_pid = next->pid;
44 entry->next_prio = next->prio;
45 entry->next_state = next->state;
46 entry->next_cpu = task_cpu(next);
47
Steven Rostedte77405a2009-09-02 14:17:06 -040048 if (!filter_check_discard(call, entry, buffer, event))
49 trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020050}
51
Ingo Molnare309b412008-05-12 21:20:51 +020052static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040053probe_sched_switch(struct rq *__rq, struct task_struct *prev,
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020054 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020055{
Steven Rostedt35e8e302008-05-12 21:20:42 +020056 struct trace_array_cpu *data;
57 unsigned long flags;
Steven Rostedt35e8e302008-05-12 21:20:42 +020058 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -040059 int pc;
Steven Rostedt35e8e302008-05-12 21:20:42 +020060
Zhaoleidcef7882009-03-31 15:26:14 +080061 if (unlikely(!sched_ref))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040062 return;
63
Steven Rostedt41bc8142008-05-22 11:49:22 -040064 tracing_record_cmdline(prev);
65 tracing_record_cmdline(next);
66
Zhaoleidcef7882009-03-31 15:26:14 +080067 if (!tracer_enabled || sched_stopped)
Steven Rostedt35e8e302008-05-12 21:20:42 +020068 return;
69
Steven Rostedt38697052008-10-01 13:14:09 -040070 pc = preempt_count();
Steven Rostedt18cef372008-05-12 21:20:44 +020071 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020072 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040073 data = ctx_trace->data[cpu];
Steven Rostedt35e8e302008-05-12 21:20:42 +020074
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -040075 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -050076 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
Steven Rostedt35e8e302008-05-12 21:20:42 +020077
Steven Rostedt18cef372008-05-12 21:20:44 +020078 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020079}
80
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020081void
82tracing_sched_wakeup_trace(struct trace_array *tr,
83 struct task_struct *wakee,
84 struct task_struct *curr,
85 unsigned long flags, int pc)
86{
87 struct ftrace_event_call *call = &event_wakeup;
88 struct ring_buffer_event *event;
89 struct ctx_switch_entry *entry;
Steven Rostedte77405a2009-09-02 14:17:06 -040090 struct ring_buffer *buffer = tr->buffer;
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020091
Steven Rostedte77405a2009-09-02 14:17:06 -040092 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020093 sizeof(*entry), flags, pc);
94 if (!event)
95 return;
96 entry = ring_buffer_event_data(event);
97 entry->prev_pid = curr->pid;
98 entry->prev_prio = curr->prio;
99 entry->prev_state = curr->state;
100 entry->next_pid = wakee->pid;
101 entry->next_prio = wakee->prio;
102 entry->next_state = wakee->state;
103 entry->next_cpu = task_cpu(wakee);
104
Steven Rostedte77405a2009-09-02 14:17:06 -0400105 if (!filter_check_discard(call, entry, buffer, event))
106 ring_buffer_unlock_commit(buffer, event);
107 ftrace_trace_stack(tr->buffer, flags, 6, pc);
108 ftrace_trace_userstack(tr->buffer, flags, pc);
Frederic Weisbecker82e04af2009-07-29 18:00:29 +0200109}
110
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200111static void
Peter Zijlstra468a15b2008-12-16 08:07:03 +0100112probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200113{
Ingo Molnar57422792008-05-12 21:20:51 +0200114 struct trace_array_cpu *data;
115 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400116 int cpu, pc;
Ingo Molnar57422792008-05-12 21:20:51 +0200117
Zhaoleidcef7882009-03-31 15:26:14 +0800118 if (unlikely(!sched_ref))
119 return;
120
121 tracing_record_cmdline(current);
122
123 if (!tracer_enabled || sched_stopped)
Ingo Molnar57422792008-05-12 21:20:51 +0200124 return;
125
Steven Rostedt38697052008-10-01 13:14:09 -0400126 pc = preempt_count();
Ingo Molnar57422792008-05-12 21:20:51 +0200127 local_irq_save(flags);
128 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400129 data = ctx_trace->data[cpu];
Ingo Molnar57422792008-05-12 21:20:51 +0200130
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -0400131 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500132 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
Steven Rostedt38697052008-10-01 13:14:09 -0400133 flags, pc);
Ingo Molnar57422792008-05-12 21:20:51 +0200134
Ingo Molnar57422792008-05-12 21:20:51 +0200135 local_irq_restore(flags);
136}
137
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200138static int tracing_sched_register(void)
139{
140 int ret;
141
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400142 ret = register_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200143 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400144 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200145 " probe to kernel_sched_wakeup\n");
146 return ret;
147 }
148
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400149 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200150 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400151 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200152 " probe to kernel_sched_wakeup_new\n");
153 goto fail_deprobe;
154 }
155
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400156 ret = register_trace_sched_switch(probe_sched_switch);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200157 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400158 pr_info("sched trace: Couldn't activate tracepoint"
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500159 " probe to kernel_sched_switch\n");
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200160 goto fail_deprobe_wake_new;
161 }
162
163 return ret;
164fail_deprobe_wake_new:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400165 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200166fail_deprobe:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400167 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200168 return ret;
169}
170
171static void tracing_sched_unregister(void)
172{
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400173 unregister_trace_sched_switch(probe_sched_switch);
174 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
175 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200176}
177
Ingo Molnarf2252932008-05-22 10:37:48 +0200178static void tracing_start_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200179{
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100180 mutex_lock(&sched_register_mutex);
Steven Rostedte168e052008-11-07 22:36:02 -0500181 if (!(sched_ref++))
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200182 tracing_sched_register();
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100183 mutex_unlock(&sched_register_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200184}
185
Ingo Molnarf2252932008-05-22 10:37:48 +0200186static void tracing_stop_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200187{
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100188 mutex_lock(&sched_register_mutex);
Steven Rostedte168e052008-11-07 22:36:02 -0500189 if (!(--sched_ref))
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200190 tracing_sched_unregister();
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100191 mutex_unlock(&sched_register_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200192}
193
Steven Rostedt41bc8142008-05-22 11:49:22 -0400194void tracing_start_cmdline_record(void)
195{
196 tracing_start_sched_switch();
197}
198
199void tracing_stop_cmdline_record(void)
200{
201 tracing_stop_sched_switch();
202}
203
Steven Rostedt75f5c472008-11-07 22:36:02 -0500204/**
Steven Rostedte168e052008-11-07 22:36:02 -0500205 * tracing_start_sched_switch_record - start tracing context switches
206 *
207 * Turns on context switch tracing for a tracer.
208 */
209void tracing_start_sched_switch_record(void)
210{
211 if (unlikely(!ctx_trace)) {
212 WARN_ON(1);
213 return;
214 }
215
216 tracing_start_sched_switch();
217
218 mutex_lock(&sched_register_mutex);
219 tracer_enabled++;
220 mutex_unlock(&sched_register_mutex);
221}
222
223/**
224 * tracing_stop_sched_switch_record - start tracing context switches
225 *
226 * Turns off context switch tracing for a tracer.
227 */
228void tracing_stop_sched_switch_record(void)
229{
230 mutex_lock(&sched_register_mutex);
231 tracer_enabled--;
232 WARN_ON(tracer_enabled < 0);
233 mutex_unlock(&sched_register_mutex);
234
235 tracing_stop_sched_switch();
236}
237
238/**
239 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
Steven Rostedt75f5c472008-11-07 22:36:02 -0500240 * @tr: trace array pointer to assign
241 *
242 * Some tracers might want to record the context switches in their
243 * trace. This function lets those tracers assign the trace array
244 * to use.
245 */
Steven Rostedte168e052008-11-07 22:36:02 -0500246void tracing_sched_switch_assign_trace(struct trace_array *tr)
Steven Rostedt75f5c472008-11-07 22:36:02 -0500247{
248 ctx_trace = tr;
249}
250
Ingo Molnare309b412008-05-12 21:20:51 +0200251static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200252{
Steven Rostedte168e052008-11-07 22:36:02 -0500253 tracing_stop_sched_switch_record();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200254}
255
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100256static int sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200257{
258 ctx_trace = tr;
Steven Rostedt5fec6dd2009-03-17 19:59:53 -0400259 tracing_reset_online_cpus(tr);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200260 tracing_start_sched_switch_record();
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100261 return 0;
Steven Rostedt35e8e302008-05-12 21:20:42 +0200262}
263
Ingo Molnare309b412008-05-12 21:20:51 +0200264static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200265{
Steven Rostedtc76f0692008-11-07 22:36:02 -0500266 if (sched_ref)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200267 stop_sched_trace(tr);
268}
269
Steven Rostedt90369902008-11-05 16:05:44 -0500270static void sched_switch_trace_start(struct trace_array *tr)
271{
Steven Rostedt5fec6dd2009-03-17 19:59:53 -0400272 sched_stopped = 0;
Steven Rostedt90369902008-11-05 16:05:44 -0500273}
274
275static void sched_switch_trace_stop(struct trace_array *tr)
276{
Steven Rostedt5fec6dd2009-03-17 19:59:53 -0400277 sched_stopped = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500278}
279
Steven Rostedt75f5c472008-11-07 22:36:02 -0500280static struct tracer sched_switch_trace __read_mostly =
Steven Rostedt35e8e302008-05-12 21:20:42 +0200281{
282 .name = "sched_switch",
283 .init = sched_switch_trace_init,
284 .reset = sched_switch_trace_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500285 .start = sched_switch_trace_start,
286 .stop = sched_switch_trace_stop,
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +0100287 .wait_pipe = poll_wait_pipe,
Steven Rostedt60a11772008-05-12 21:20:44 +0200288#ifdef CONFIG_FTRACE_SELFTEST
289 .selftest = trace_selftest_startup_sched_switch,
290#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200291};
292
293__init static int init_sched_switch_trace(void)
294{
Steven Rostedt35e8e302008-05-12 21:20:42 +0200295 return register_tracer(&sched_switch_trace);
296}
297device_initcall(init_sched_switch_trace);
Ingo Molnarc71dd422008-12-19 01:09:51 +0100298