blob: 26b8607a0abc45205356d9b1d4a89afff16d3d04 [file] [log] [blame]
Peter Zijlstraac199db2009-03-19 20:26:15 +01001/*
Frederic Weisbecker97d5a222010-03-05 05:35:37 +01002 * trace event based perf event profiling/tracing
Peter Zijlstraac199db2009-03-19 20:26:15 +01003 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
Frederic Weisbeckerc5306652010-03-03 07:16:16 +01005 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
Peter Zijlstraac199db2009-03-19 20:26:15 +01006 */
7
Li Zefan558e6542009-08-24 12:19:47 +08008#include <linux/module.h>
Xiao Guangrong430ad5a2010-01-28 09:32:29 +08009#include <linux/kprobes.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +010010#include "trace.h"
11
Frederic Weisbeckerdcd5c162010-03-16 01:05:02 +010012EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020013
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020014static char *perf_trace_buf[4];
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020015
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +010016/*
17 * Force it to be aligned to unsigned long to avoid misaligned accesses
18 * suprises
19 */
20typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21 perf_trace_t;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010022
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020023/* Count the events in use (per event id, not per instance) */
Frederic Weisbecker97d5a222010-03-05 05:35:37 +010024static int total_ref_count;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020025
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020026static int perf_trace_event_init(struct ftrace_event_call *tp_event,
27 struct perf_event *p_event)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020028{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020029 struct hlist_head *list;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020030 int ret = -ENOMEM;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020031 int cpu;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020032
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020033 p_event->tp_event = tp_event;
34 if (tp_event->perf_refcount++ > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020035 return 0;
36
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020037 list = alloc_percpu(struct hlist_head);
38 if (!list)
39 goto fail;
40
41 for_each_possible_cpu(cpu)
42 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
43
44 tp_event->perf_events = list;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020045
Frederic Weisbecker97d5a222010-03-05 05:35:37 +010046 if (!total_ref_count) {
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020047 char *buf;
48 int i;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020049
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020050 for (i = 0; i < 4; i++) {
51 buf = (char *)alloc_percpu(perf_trace_t);
52 if (!buf)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020053 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020054
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020055 perf_trace_buf[i] = buf;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020056 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020057 }
58
Steven Rostedtff5f1492010-05-21 11:49:57 -040059 if (tp_event->class->reg)
60 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
Steven Rostedt22392912010-04-21 12:27:06 -040061 else
Steven Rostedtff5f1492010-05-21 11:49:57 -040062 ret = tracepoint_probe_register(tp_event->name,
63 tp_event->class->perf_probe,
64 tp_event);
65
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020066 if (ret)
67 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020068
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020069 total_ref_count++;
70 return 0;
71
72fail:
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020073 if (!total_ref_count) {
74 int i;
75
76 for (i = 0; i < 4; i++) {
77 free_percpu(perf_trace_buf[i]);
78 perf_trace_buf[i] = NULL;
79 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020080 }
81
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020082 if (!--tp_event->perf_refcount) {
83 free_percpu(tp_event->perf_events);
84 tp_event->perf_events = NULL;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020085 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020086
87 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020088}
89
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020090int perf_trace_init(struct perf_event *p_event)
Peter Zijlstraac199db2009-03-19 20:26:15 +010091{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020092 struct ftrace_event_call *tp_event;
93 int event_id = p_event->attr.config;
Li Zefan20c89282009-05-06 10:33:45 +080094 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +010095
Li Zefan20c89282009-05-06 10:33:45 +080096 mutex_lock(&event_mutex);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020097 list_for_each_entry(tp_event, &ftrace_events, list) {
Steven Rostedtff5f1492010-05-21 11:49:57 -040098 if (tp_event->event.type == event_id &&
99 tp_event->class && tp_event->class->perf_probe &&
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200100 try_module_get(tp_event->mod)) {
101 ret = perf_trace_event_init(tp_event, p_event);
Li Zefan20c89282009-05-06 10:33:45 +0800102 break;
103 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100104 }
Li Zefan20c89282009-05-06 10:33:45 +0800105 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100106
Li Zefan20c89282009-05-06 10:33:45 +0800107 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +0100108}
109
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200110int perf_trace_enable(struct perf_event *p_event)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200111{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200112 struct ftrace_event_call *tp_event = p_event->tp_event;
113 struct hlist_head *list;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200114
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200115 list = tp_event->perf_events;
116 if (WARN_ON_ONCE(!list))
117 return -EINVAL;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200118
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200119 list = per_cpu_ptr(list, smp_processor_id());
120 hlist_add_head_rcu(&p_event->hlist_entry, list);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200121
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200122 return 0;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200123}
124
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200125void perf_trace_disable(struct perf_event *p_event)
Peter Zijlstraac199db2009-03-19 20:26:15 +0100126{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200127 hlist_del_rcu(&p_event->hlist_entry);
128}
Peter Zijlstraac199db2009-03-19 20:26:15 +0100129
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200130void perf_trace_destroy(struct perf_event *p_event)
131{
132 struct ftrace_event_call *tp_event = p_event->tp_event;
133 int i;
134
135 if (--tp_event->perf_refcount > 0)
136 return;
137
Steven Rostedtff5f1492010-05-21 11:49:57 -0400138 if (tp_event->class->reg)
139 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
140 else
141 tracepoint_probe_unregister(tp_event->name,
142 tp_event->class->perf_probe,
143 tp_event);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200144
145 free_percpu(tp_event->perf_events);
146 tp_event->perf_events = NULL;
147
148 if (!--total_ref_count) {
149 for (i = 0; i < 4; i++) {
150 free_percpu(perf_trace_buf[i]);
151 perf_trace_buf[i] = NULL;
Li Zefan20c89282009-05-06 10:33:45 +0800152 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100153 }
154}
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800155
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100156__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200157 struct pt_regs *regs, int *rctxp)
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800158{
159 struct trace_entry *entry;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200160 char *raw_data;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200161 int pc;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800162
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100163 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
164
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800165 pc = preempt_count();
166
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800167 *rctxp = perf_swevent_get_recursion_context();
168 if (*rctxp < 0)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200169 return NULL;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800170
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200171 raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id());
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800172
173 /* zero the dead bytes from align to not leak stack to user */
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100174 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800175
176 entry = (struct trace_entry *)raw_data;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200177 tracing_generic_entry_update(entry, regs->flags, pc);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800178 entry->type = type;
179
180 return raw_data;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800181}
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100182EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);