blob: 77835bc021c766744dd1976b4429141e3fb645fa [file] [log] [blame]
Ingo Molnar21b32bb2006-07-03 00:24:40 -07001/*
Ingo Molnar21b32bb2006-07-03 00:24:40 -07002 * Stack trace management functions
3 *
Ingo Molnar8f47e162009-01-31 02:03:42 +01004 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
Ingo Molnar21b32bb2006-07-03 00:24:40 -07005 */
6#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +01007#include <linux/sched/debug.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +01008#include <linux/sched/task_stack.h>
Ingo Molnar21b32bb2006-07-03 00:24:40 -07009#include <linux/stacktrace.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040010#include <linux/export.h>
Török Edwin02b67512008-11-22 13:28:47 +020011#include <linux/uaccess.h>
Andi Kleenc0b766f2006-09-26 10:52:34 +020012#include <asm/stacktrace.h>
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050013#include <asm/unwind.h>
Ingo Molnar21b32bb2006-07-03 00:24:40 -070014
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050015static int save_stack_address(struct stack_trace *trace, unsigned long addr,
16 bool nosched)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070017{
Oleg Nesterov018378c2010-06-03 21:32:43 +020018 if (nosched && in_sched_functions(addr))
Alexei Starovoitov568b3292016-02-17 19:58:57 -080019 return 0;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050020
Andi Kleenc0b766f2006-09-26 10:52:34 +020021 if (trace->skip > 0) {
22 trace->skip--;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080023 return 0;
Andi Kleenc0b766f2006-09-26 10:52:34 +020024 }
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050025
26 if (trace->nr_entries >= trace->max_entries)
27 return -1;
28
29 trace->entries[trace->nr_entries++] = addr;
30 return 0;
31}
32
Vlastimil Babka77072f02017-09-29 11:23:35 +020033static void noinline __save_stack_trace(struct stack_trace *trace,
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050034 struct task_struct *task, struct pt_regs *regs,
35 bool nosched)
36{
37 struct unwind_state state;
38 unsigned long addr;
39
40 if (regs)
41 save_stack_address(trace, regs->ip, nosched);
42
43 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
44 unwind_next_frame(&state)) {
45 addr = unwind_get_return_address(&state);
46 if (!addr || save_stack_address(trace, addr, nosched))
47 break;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080048 }
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050049
50 if (trace->nr_entries < trace->max_entries)
51 trace->entries[trace->nr_entries++] = ULONG_MAX;
Andi Kleenc0b766f2006-09-26 10:52:34 +020052}
53
Ingo Molnar21b32bb2006-07-03 00:24:40 -070054/*
55 * Save stack-backtrace addresses into a stack_trace buffer.
Ingo Molnar21b32bb2006-07-03 00:24:40 -070056 */
Christoph Hellwigab1b6f02007-05-08 00:23:29 -070057void save_stack_trace(struct stack_trace *trace)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070058{
Vlastimil Babka77072f02017-09-29 11:23:35 +020059 trace->skip++;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050060 __save_stack_trace(trace, current, NULL, false);
Ingo Molnar21b32bb2006-07-03 00:24:40 -070061}
Ingo Molnar85946982008-06-27 21:20:17 +020062EXPORT_SYMBOL_GPL(save_stack_trace);
Arjan van de Ven97455122008-01-25 21:08:34 +010063
Masami Hiramatsu39581062011-06-08 16:09:21 +090064void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
Vegard Nossumacc6be52008-05-20 11:15:43 +020065{
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050066 __save_stack_trace(trace, current, regs, false);
Vegard Nossumacc6be52008-05-20 11:15:43 +020067}
68
Arjan van de Ven97455122008-01-25 21:08:34 +010069void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
70{
Andy Lutomirski1959a602016-09-15 22:45:45 -070071 if (!try_get_task_stack(tsk))
72 return;
73
Vlastimil Babka77072f02017-09-29 11:23:35 +020074 if (tsk == current)
75 trace->skip++;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050076 __save_stack_trace(trace, tsk, NULL, true);
Andy Lutomirski1959a602016-09-15 22:45:45 -070077
78 put_task_stack(tsk);
Arjan van de Ven97455122008-01-25 21:08:34 +010079}
Ingo Molnar85946982008-06-27 21:20:17 +020080EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
Török Edwin02b67512008-11-22 13:28:47 +020081
Josh Poimboeufaf085d92017-02-13 19:42:28 -060082#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
83
84#define STACKTRACE_DUMP_ONCE(task) ({ \
85 static bool __section(.data.unlikely) __dumped; \
86 \
87 if (!__dumped) { \
88 __dumped = true; \
89 WARN_ON(1); \
90 show_stack(task, NULL); \
91 } \
92})
93
Vlastimil Babka77072f02017-09-29 11:23:35 +020094static int __always_inline
95__save_stack_trace_reliable(struct stack_trace *trace,
96 struct task_struct *task)
Josh Poimboeufaf085d92017-02-13 19:42:28 -060097{
98 struct unwind_state state;
99 struct pt_regs *regs;
100 unsigned long addr;
101
102 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
103 unwind_next_frame(&state)) {
104
105 regs = unwind_get_entry_regs(&state);
106 if (regs) {
107 /*
108 * Kernel mode registers on the stack indicate an
109 * in-kernel interrupt or exception (e.g., preemption
110 * or a page fault), which can make frame pointers
111 * unreliable.
112 */
113 if (!user_mode(regs))
114 return -EINVAL;
115
116 /*
117 * The last frame contains the user mode syscall
118 * pt_regs. Skip it and finish the unwind.
119 */
120 unwind_next_frame(&state);
121 if (!unwind_done(&state)) {
122 STACKTRACE_DUMP_ONCE(task);
123 return -EINVAL;
124 }
125 break;
126 }
127
128 addr = unwind_get_return_address(&state);
129
130 /*
131 * A NULL or invalid return address probably means there's some
132 * generated code which __kernel_text_address() doesn't know
133 * about.
134 */
135 if (!addr) {
136 STACKTRACE_DUMP_ONCE(task);
137 return -EINVAL;
138 }
139
140 if (save_stack_address(trace, addr, false))
141 return -EINVAL;
142 }
143
144 /* Check for stack corruption */
145 if (unwind_error(&state)) {
146 STACKTRACE_DUMP_ONCE(task);
147 return -EINVAL;
148 }
149
150 if (trace->nr_entries < trace->max_entries)
151 trace->entries[trace->nr_entries++] = ULONG_MAX;
152
153 return 0;
154}
155
156/*
157 * This function returns an error if it detects any unreliable features of the
158 * stack. Otherwise it guarantees that the stack trace is reliable.
159 *
160 * If the task is not 'current', the caller *must* ensure the task is inactive.
161 */
162int save_stack_trace_tsk_reliable(struct task_struct *tsk,
163 struct stack_trace *trace)
164{
165 int ret;
166
167 if (!try_get_task_stack(tsk))
168 return -EINVAL;
169
170 ret = __save_stack_trace_reliable(trace, tsk);
171
172 put_task_stack(tsk);
173
174 return ret;
175}
176#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
177
Török Edwin02b67512008-11-22 13:28:47 +0200178/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
179
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200180struct stack_frame_user {
Török Edwin02b67512008-11-22 13:28:47 +0200181 const void __user *next_fp;
Török Edwin8d7c6a92008-11-23 12:39:06 +0200182 unsigned long ret_addr;
Török Edwin02b67512008-11-22 13:28:47 +0200183};
184
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200185static int
186copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
Török Edwin02b67512008-11-22 13:28:47 +0200187{
188 int ret;
189
190 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
191 return 0;
192
193 ret = 1;
194 pagefault_disable();
195 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
196 ret = 0;
197 pagefault_enable();
198
199 return ret;
200}
201
Török Edwin8d7c6a92008-11-23 12:39:06 +0200202static inline void __save_stack_trace_user(struct stack_trace *trace)
203{
204 const struct pt_regs *regs = task_pt_regs(current);
205 const void __user *fp = (const void __user *)regs->bp;
206
207 if (trace->nr_entries < trace->max_entries)
208 trace->entries[trace->nr_entries++] = regs->ip;
209
210 while (trace->nr_entries < trace->max_entries) {
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200211 struct stack_frame_user frame;
Török Edwin8d7c6a92008-11-23 12:39:06 +0200212
213 frame.next_fp = NULL;
214 frame.ret_addr = 0;
215 if (!copy_stack_frame(fp, &frame))
216 break;
217 if ((unsigned long)fp < regs->sp)
218 break;
219 if (frame.ret_addr) {
220 trace->entries[trace->nr_entries++] =
221 frame.ret_addr;
222 }
223 if (fp == frame.next_fp)
224 break;
225 fp = frame.next_fp;
226 }
227}
228
Török Edwin02b67512008-11-22 13:28:47 +0200229void save_stack_trace_user(struct stack_trace *trace)
230{
231 /*
232 * Trace user stack if we are not a kernel thread
233 */
234 if (current->mm) {
Török Edwin8d7c6a92008-11-23 12:39:06 +0200235 __save_stack_trace_user(trace);
Török Edwin02b67512008-11-22 13:28:47 +0200236 }
237 if (trace->nr_entries < trace->max_entries)
238 trace->entries[trace->nr_entries++] = ULONG_MAX;
239}