blob: 8abf1ba18085742af78176dbc514095a47643c9c [file] [log] [blame]
Steven Rostedte5a81b62008-08-27 23:31:01 -04001/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
Steven Rostedte5a81b62008-08-27 23:31:01 -040010#include <linux/ftrace.h>
11#include <linux/module.h>
Steven Rostedtf38f1d22008-12-16 23:06:40 -050012#include <linux/sysctl.h>
Steven Rostedte5a81b62008-08-27 23:31:01 -040013#include <linux/init.h>
Steven Rostedt762e1202011-12-19 22:01:00 -050014
15#include <asm/setup.h>
16
Steven Rostedte5a81b62008-08-27 23:31:01 -040017#include "trace.h"
18
19#define STACK_TRACE_ENTRIES 500
20
Steven Rostedt1b6cced2008-08-29 16:51:43 -040021static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
22 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
23static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
24
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040025/*
26 * Reserve one entry for the passed in ip. This will allow
27 * us to remove most or all of the stack size overhead
28 * added by the stack tracer itself.
29 */
Steven Rostedte5a81b62008-08-27 23:31:01 -040030static struct stack_trace max_stack_trace = {
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040031 .max_entries = STACK_TRACE_ENTRIES - 1,
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -040032 .entries = &stack_dump_trace[0],
Steven Rostedte5a81b62008-08-27 23:31:01 -040033};
34
35static unsigned long max_stack_size;
Thomas Gleixner445c8952009-12-02 19:49:50 +010036static arch_spinlock_t max_stack_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010037 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedte5a81b62008-08-27 23:31:01 -040038
Steven Rostedte5a81b62008-08-27 23:31:01 -040039static DEFINE_PER_CPU(int, trace_active);
Steven Rostedtf38f1d22008-12-16 23:06:40 -050040static DEFINE_MUTEX(stack_sysctl_mutex);
41
42int stack_tracer_enabled;
43static int last_stack_tracer_enabled;
Steven Rostedte5a81b62008-08-27 23:31:01 -040044
Minchan Kime3172182014-06-02 13:33:12 +090045static inline void print_max_stack(void)
46{
47 long i;
48 int size;
49
50 pr_emerg(" Depth Size Location (%d entries)\n"
51 " ----- ---- --------\n",
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -040052 max_stack_trace.nr_entries);
Minchan Kime3172182014-06-02 13:33:12 +090053
54 for (i = 0; i < max_stack_trace.nr_entries; i++) {
55 if (stack_dump_trace[i] == ULONG_MAX)
56 break;
57 if (i+1 == max_stack_trace.nr_entries ||
58 stack_dump_trace[i+1] == ULONG_MAX)
59 size = stack_dump_index[i];
60 else
61 size = stack_dump_index[i] - stack_dump_index[i+1];
62
63 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
64 size, (void *)stack_dump_trace[i]);
65 }
66}
67
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -040068static inline void
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -040069check_stack(unsigned long ip, unsigned long *stack)
Steven Rostedte5a81b62008-08-27 23:31:01 -040070{
Minchan Kime3172182014-06-02 13:33:12 +090071 unsigned long this_size, flags; unsigned long *p, *top, *start;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040072 static int tracer_frame;
73 int frame_size = ACCESS_ONCE(tracer_frame);
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -040074 int i, x;
Steven Rostedte5a81b62008-08-27 23:31:01 -040075
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -040076 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
Steven Rostedte5a81b62008-08-27 23:31:01 -040077 this_size = THREAD_SIZE - this_size;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040078 /* Remove the frame of the tracer */
79 this_size -= frame_size;
Steven Rostedte5a81b62008-08-27 23:31:01 -040080
81 if (this_size <= max_stack_size)
82 return;
83
Steven Rostedt81520a12008-10-06 21:24:18 -040084 /* we do not handle interrupt stacks yet */
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -040085 if (!object_is_on_stack(stack))
Steven Rostedt81520a12008-10-06 21:24:18 -040086 return;
87
Steven Rostedt (Red Hat)1904be12015-10-20 21:48:02 -040088 /* Can't do this from NMI context (can cause deadlocks) */
89 if (in_nmi())
90 return;
91
Steven Rostedta5e25882008-12-02 15:34:05 -050092 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010093 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -040094
Steven Rostedt (Red Hat)a2d76292015-10-20 11:38:08 -040095 /*
96 * RCU may not be watching, make it see us.
97 * The stack trace code uses rcu_sched.
98 */
99 rcu_irq_enter();
100
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400101 /* In case another CPU set the tracer_frame on us */
102 if (unlikely(!frame_size))
103 this_size -= tracer_frame;
104
Steven Rostedte5a81b62008-08-27 23:31:01 -0400105 /* a race could have already updated it */
106 if (this_size <= max_stack_size)
107 goto out;
108
109 max_stack_size = this_size;
110
Jiaxing Wang7eea4fc2014-04-20 23:10:43 +0800111 max_stack_trace.nr_entries = 0;
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400112 max_stack_trace.skip = 3;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400113
114 save_stack_trace(&max_stack_trace);
115
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400116 /* Skip over the overhead of the stack tracer itself */
117 for (i = 0; i < max_stack_trace.nr_entries; i++) {
118 if (stack_dump_trace[i] == ip)
119 break;
120 }
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -0400121
122 /*
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400123 * Now find where in the stack these are.
124 */
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400125 x = 0;
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -0400126 start = stack;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400127 top = (unsigned long *)
128 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
129
130 /*
131 * Loop through all the entries. One of the entries may
132 * for some reason be missed on the stack, so we may
133 * have to account for them. If they are all there, this
134 * loop will only happen once. This code only takes place
135 * on a new max, so it is far from a fast path.
136 */
137 while (i < max_stack_trace.nr_entries) {
Steven Rostedt0a371192008-12-03 11:04:50 -0500138 int found = 0;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400139
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400140 stack_dump_index[x] = this_size;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400141 p = start;
142
143 for (; p < top && i < max_stack_trace.nr_entries; p++) {
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400144 if (stack_dump_trace[i] == ULONG_MAX)
145 break;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400146 if (*p == stack_dump_trace[i]) {
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400147 stack_dump_trace[x] = stack_dump_trace[i++];
148 this_size = stack_dump_index[x++] =
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400149 (top - p) * sizeof(unsigned long);
Steven Rostedt0a371192008-12-03 11:04:50 -0500150 found = 1;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400151 /* Start the search from here */
152 start = p + 1;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400153 /*
154 * We do not want to show the overhead
155 * of the stack tracer stack in the
156 * max stack. If we haven't figured
157 * out what that is, then figure it out
158 * now.
159 */
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400160 if (unlikely(!tracer_frame)) {
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400161 tracer_frame = (p - stack) *
162 sizeof(unsigned long);
163 max_stack_size -= tracer_frame;
164 }
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400165 }
166 }
167
Steven Rostedt0a371192008-12-03 11:04:50 -0500168 if (!found)
169 i++;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400170 }
171
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400172 max_stack_trace.nr_entries = x;
173 for (; x < i; x++)
174 stack_dump_trace[x] = ULONG_MAX;
175
Aaron Tomlina70857e2014-09-12 14:16:18 +0100176 if (task_stack_end_corrupted(current)) {
Minchan Kime3172182014-06-02 13:33:12 +0900177 print_max_stack();
178 BUG();
179 }
180
Steven Rostedte5a81b62008-08-27 23:31:01 -0400181 out:
Steven Rostedt (Red Hat)a2d76292015-10-20 11:38:08 -0400182 rcu_irq_exit();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100183 arch_spin_unlock(&max_stack_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -0500184 local_irq_restore(flags);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400185}
186
187static void
Steven Rostedta1e2e312011-08-09 12:50:46 -0400188stack_trace_call(unsigned long ip, unsigned long parent_ip,
189 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400190{
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -0400191 unsigned long stack;
Steven Rostedt5168ae52010-06-03 09:36:50 -0400192 int cpu;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400193
Steven Rostedt5168ae52010-06-03 09:36:50 -0400194 preempt_disable_notrace();
Steven Rostedte5a81b62008-08-27 23:31:01 -0400195
196 cpu = raw_smp_processor_id();
197 /* no atomic needed, we only modify this variable by this cpu */
198 if (per_cpu(trace_active, cpu)++ != 0)
199 goto out;
200
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400201 ip += MCOUNT_INSN_SIZE;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400202
203 check_stack(ip, &stack);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400204
205 out:
206 per_cpu(trace_active, cpu)--;
207 /* prevent recursion in schedule */
Steven Rostedt5168ae52010-06-03 09:36:50 -0400208 preempt_enable_notrace();
Steven Rostedte5a81b62008-08-27 23:31:01 -0400209}
210
211static struct ftrace_ops trace_ops __read_mostly =
212{
213 .func = stack_trace_call,
Steven Rostedt47409742012-07-20 11:04:44 -0400214 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400215};
216
217static ssize_t
218stack_max_size_read(struct file *filp, char __user *ubuf,
219 size_t count, loff_t *ppos)
220{
221 unsigned long *ptr = filp->private_data;
222 char buf[64];
223 int r;
224
225 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
226 if (r > sizeof(buf))
227 r = sizeof(buf);
228 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
229}
230
231static ssize_t
232stack_max_size_write(struct file *filp, const char __user *ubuf,
233 size_t count, loff_t *ppos)
234{
235 long *ptr = filp->private_data;
236 unsigned long val, flags;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400237 int ret;
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800238 int cpu;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400239
Peter Huewe22fe9b52011-06-07 21:58:27 +0200240 ret = kstrtoul_from_user(ubuf, count, 10, &val);
241 if (ret)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400242 return ret;
243
Steven Rostedta5e25882008-12-02 15:34:05 -0500244 local_irq_save(flags);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800245
246 /*
247 * In case we trace inside arch_spin_lock() or after (NMI),
248 * we will cause circular lock, so we also need to increase
249 * the percpu trace_active here.
250 */
251 cpu = smp_processor_id();
252 per_cpu(trace_active, cpu)++;
253
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100254 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400255 *ptr = val;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100256 arch_spin_unlock(&max_stack_lock);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800257
258 per_cpu(trace_active, cpu)--;
Steven Rostedta5e25882008-12-02 15:34:05 -0500259 local_irq_restore(flags);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400260
261 return count;
262}
263
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500264static const struct file_operations stack_max_size_fops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400265 .open = tracing_open_generic,
266 .read = stack_max_size_read,
267 .write = stack_max_size_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200268 .llseek = default_llseek,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400269};
270
271static void *
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800272__next(struct seq_file *m, loff_t *pos)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400273{
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800274 long n = *pos - 1;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400275
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400276 if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400277 return NULL;
278
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800279 m->private = (void *)n;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400280 return &m->private;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400281}
282
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800283static void *
284t_next(struct seq_file *m, void *v, loff_t *pos)
285{
286 (*pos)++;
287 return __next(m, pos);
288}
289
Steven Rostedte5a81b62008-08-27 23:31:01 -0400290static void *t_start(struct seq_file *m, loff_t *pos)
291{
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800292 int cpu;
293
Steven Rostedte5a81b62008-08-27 23:31:01 -0400294 local_irq_disable();
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800295
296 cpu = smp_processor_id();
297 per_cpu(trace_active, cpu)++;
298
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100299 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400300
Liming Wang522a1102008-11-21 11:00:18 +0800301 if (*pos == 0)
302 return SEQ_START_TOKEN;
303
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800304 return __next(m, pos);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400305}
306
307static void t_stop(struct seq_file *m, void *p)
308{
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800309 int cpu;
310
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100311 arch_spin_unlock(&max_stack_lock);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800312
313 cpu = smp_processor_id();
314 per_cpu(trace_active, cpu)--;
315
Steven Rostedte5a81b62008-08-27 23:31:01 -0400316 local_irq_enable();
317}
318
Joe Perches962e3702015-04-15 16:18:22 -0700319static void trace_lookup_stack(struct seq_file *m, long i)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400320{
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400321 unsigned long addr = stack_dump_trace[i];
Steven Rostedte5a81b62008-08-27 23:31:01 -0400322
Joe Perches962e3702015-04-15 16:18:22 -0700323 seq_printf(m, "%pS\n", (void *)addr);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400324}
325
Steven Rostedte447e1df2009-03-12 19:42:29 -0400326static void print_disabled(struct seq_file *m)
327{
328 seq_puts(m, "#\n"
329 "# Stack tracer disabled\n"
330 "#\n"
331 "# To enable the stack tracer, either add 'stacktrace' to the\n"
332 "# kernel command line\n"
333 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
334 "#\n");
335}
336
Steven Rostedte5a81b62008-08-27 23:31:01 -0400337static int t_show(struct seq_file *m, void *v)
338{
Liming Wang522a1102008-11-21 11:00:18 +0800339 long i;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400340 int size;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400341
Liming Wang522a1102008-11-21 11:00:18 +0800342 if (v == SEQ_START_TOKEN) {
Steven Rostedteb1871f2009-03-13 00:00:58 -0400343 seq_printf(m, " Depth Size Location"
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400344 " (%d entries)\n"
Steven Rostedteb1871f2009-03-13 00:00:58 -0400345 " ----- ---- --------\n",
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400346 max_stack_trace.nr_entries);
Steven Rostedte447e1df2009-03-12 19:42:29 -0400347
348 if (!stack_tracer_enabled && !max_stack_size)
349 print_disabled(m);
350
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400351 return 0;
352 }
353
Liming Wang522a1102008-11-21 11:00:18 +0800354 i = *(long *)v;
355
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400356 if (i >= max_stack_trace.nr_entries ||
357 stack_dump_trace[i] == ULONG_MAX)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400358 return 0;
359
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400360 if (i+1 == max_stack_trace.nr_entries ||
361 stack_dump_trace[i+1] == ULONG_MAX)
362 size = stack_dump_index[i];
363 else
364 size = stack_dump_index[i] - stack_dump_index[i+1];
365
366 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
367
368 trace_lookup_stack(m, i);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400369
370 return 0;
371}
372
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500373static const struct seq_operations stack_trace_seq_ops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400374 .start = t_start,
375 .next = t_next,
376 .stop = t_stop,
377 .show = t_show,
378};
379
380static int stack_trace_open(struct inode *inode, struct file *file)
381{
Li Zefand8cc1ab2009-07-23 11:28:40 +0800382 return seq_open(file, &stack_trace_seq_ops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400383}
384
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500385static const struct file_operations stack_trace_fops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400386 .open = stack_trace_open,
387 .read = seq_read,
388 .llseek = seq_lseek,
Li Zefand8cc1ab2009-07-23 11:28:40 +0800389 .release = seq_release,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400390};
391
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500392static int
393stack_trace_filter_open(struct inode *inode, struct file *file)
394{
395 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
396 inode, file);
397}
398
399static const struct file_operations stack_trace_filter_fops = {
400 .open = stack_trace_filter_open,
401 .read = seq_read,
402 .write = ftrace_filter_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -0500403 .llseek = tracing_lseek,
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500404 .release = ftrace_regex_release,
405};
406
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500407int
408stack_trace_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700409 void __user *buffer, size_t *lenp,
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500410 loff_t *ppos)
411{
412 int ret;
413
414 mutex_lock(&stack_sysctl_mutex);
415
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700416 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500417
418 if (ret || !write ||
Li Zefana32c7762009-06-26 16:55:51 +0800419 (last_stack_tracer_enabled == !!stack_tracer_enabled))
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500420 goto out;
421
Li Zefana32c7762009-06-26 16:55:51 +0800422 last_stack_tracer_enabled = !!stack_tracer_enabled;
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500423
424 if (stack_tracer_enabled)
425 register_ftrace_function(&trace_ops);
426 else
427 unregister_ftrace_function(&trace_ops);
428
429 out:
430 mutex_unlock(&stack_sysctl_mutex);
431 return ret;
432}
433
Steven Rostedt762e1202011-12-19 22:01:00 -0500434static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
435
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500436static __init int enable_stacktrace(char *str)
437{
Steven Rostedt762e1202011-12-19 22:01:00 -0500438 if (strncmp(str, "_filter=", 8) == 0)
439 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
440
Steven Rostedte05a43b2008-12-17 09:43:00 -0500441 stack_tracer_enabled = 1;
442 last_stack_tracer_enabled = 1;
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500443 return 1;
444}
445__setup("stacktrace", enable_stacktrace);
446
Steven Rostedte5a81b62008-08-27 23:31:01 -0400447static __init int stack_trace_init(void)
448{
449 struct dentry *d_tracer;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400450
451 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -0500452 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +0900453 return 0;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400454
Frederic Weisbecker5452af62009-03-27 00:25:38 +0100455 trace_create_file("stack_max_size", 0644, d_tracer,
456 &max_stack_size, &stack_max_size_fops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400457
Frederic Weisbecker5452af62009-03-27 00:25:38 +0100458 trace_create_file("stack_trace", 0444, d_tracer,
459 NULL, &stack_trace_fops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400460
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500461 trace_create_file("stack_trace_filter", 0444, d_tracer,
462 NULL, &stack_trace_filter_fops);
463
Steven Rostedt762e1202011-12-19 22:01:00 -0500464 if (stack_trace_filter_buf[0])
465 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
466
Steven Rostedte05a43b2008-12-17 09:43:00 -0500467 if (stack_tracer_enabled)
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500468 register_ftrace_function(&trace_ops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400469
470 return 0;
471}
472
473device_initcall(stack_trace_init);