blob: fec70fe3b1ec637920c758a34a17e2f6cecbb082 [file] [log] [blame]
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -05001#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +01002#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +01003#include <linux/sched/task_stack.h>
Josh Poimboeufa8b7a922017-04-12 13:47:12 -05004#include <linux/interrupt.h>
5#include <asm/sections.h>
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -05006#include <asm/ptrace.h>
7#include <asm/bitops.h>
8#include <asm/stacktrace.h>
9#include <asm/unwind.h>
10
11#define FRAME_HEADER_SIZE (sizeof(long) * 2)
12
Josh Poimboeuf84936112017-01-09 12:00:23 -060013/*
14 * This disables KASAN checking when reading a value from another task's stack,
15 * since the other task could be running on another CPU and could have poisoned
16 * the stack in the meantime.
17 */
18#define READ_ONCE_TASK_STACK(task, x) \
19({ \
20 unsigned long val; \
21 if (task == current) \
22 val = READ_ONCE(x); \
23 else \
24 val = READ_ONCE_NOCHECK(x); \
25 val; \
26})
27
Josh Poimboeufaa4f8532017-04-18 08:12:58 -050028static void unwind_dump(struct unwind_state *state)
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060029{
30 static bool dumped_before = false;
31 bool prev_zero, zero = false;
Josh Poimboeufaa4f8532017-04-18 08:12:58 -050032 unsigned long word, *sp;
Josh Poimboeuf262fa732017-04-25 20:48:52 -050033 struct stack_info stack_info = {0};
34 unsigned long visit_mask = 0;
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060035
36 if (dumped_before)
37 return;
38
39 dumped_before = true;
40
Josh Poimboeuf4ea3d742017-04-18 08:12:57 -050041 printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060042 state->stack_info.type, state->stack_info.next_sp,
43 state->stack_mask, state->graph_idx);
44
Josh Poimboeuf262fa732017-04-25 20:48:52 -050045 for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
46 if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
47 break;
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060048
Josh Poimboeuf262fa732017-04-25 20:48:52 -050049 for (; sp < stack_info.end; sp++) {
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060050
Josh Poimboeuf262fa732017-04-25 20:48:52 -050051 word = READ_ONCE_NOCHECK(*sp);
52
53 prev_zero = zero;
54 zero = word == 0;
55
56 if (zero) {
57 if (!prev_zero)
58 printk_deferred("%p: %0*x ...\n",
59 sp, BITS_PER_LONG/4, 0);
60 continue;
61 }
62
63 printk_deferred("%p: %0*lx (%pB)\n",
64 sp, BITS_PER_LONG/4, word, (void *)word);
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060065 }
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060066 }
67}
68
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -050069unsigned long unwind_get_return_address(struct unwind_state *state)
70{
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -050071 if (unwind_done(state))
72 return 0;
73
Josh Poimboeuf6bcdf9d2017-04-12 13:47:11 -050074 return __kernel_text_address(state->ip) ? state->ip : 0;
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -050075}
76EXPORT_SYMBOL_GPL(unwind_get_return_address);
77
Josh Poimboeuf24d86f52016-10-27 08:10:58 -050078static size_t regs_size(struct pt_regs *regs)
79{
80 /* x86_32 regs from kernel mode are two words shorter: */
81 if (IS_ENABLED(CONFIG_X86_32) && !user_mode(regs))
82 return sizeof(*regs) - 2*sizeof(long);
83
84 return sizeof(*regs);
85}
86
Josh Poimboeufa8b7a922017-04-12 13:47:12 -050087static bool in_entry_code(unsigned long ip)
88{
89 char *addr = (char *)ip;
90
91 if (addr >= __entry_text_start && addr < __entry_text_end)
92 return true;
93
94#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
95 if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
96 return true;
97#endif
98
99 return false;
100}
101
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500102static inline unsigned long *last_frame(struct unwind_state *state)
103{
104 return (unsigned long *)task_pt_regs(state->task) - 2;
105}
106
Josh Poimboeuf87a6b292017-03-13 23:27:47 -0500107#ifdef CONFIG_X86_32
108#define GCC_REALIGN_WORDS 3
109#else
110#define GCC_REALIGN_WORDS 1
111#endif
112
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500113static inline unsigned long *last_aligned_frame(struct unwind_state *state)
114{
115 return last_frame(state) - GCC_REALIGN_WORDS;
116}
117
Josh Poimboeufacb46082016-10-20 11:34:41 -0500118static bool is_last_task_frame(struct unwind_state *state)
119{
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500120 unsigned long *last_bp = last_frame(state);
121 unsigned long *aligned_bp = last_aligned_frame(state);
Josh Poimboeufacb46082016-10-20 11:34:41 -0500122
Josh Poimboeuf8023e0e2016-12-16 10:05:05 -0600123 /*
124 * We have to check for the last task frame at two different locations
125 * because gcc can occasionally decide to realign the stack pointer and
Josh Poimboeuf87a6b292017-03-13 23:27:47 -0500126 * change the offset of the stack frame in the prologue of a function
127 * called by head/entry code. Examples:
128 *
129 * <start_secondary>:
130 * push %edi
131 * lea 0x8(%esp),%edi
132 * and $0xfffffff8,%esp
133 * pushl -0x4(%edi)
134 * push %ebp
135 * mov %esp,%ebp
136 *
137 * <x86_64_start_kernel>:
138 * lea 0x8(%rsp),%r10
139 * and $0xfffffffffffffff0,%rsp
140 * pushq -0x8(%r10)
141 * push %rbp
142 * mov %rsp,%rbp
143 *
144 * Note that after aligning the stack, it pushes a duplicate copy of
145 * the return address before pushing the frame pointer.
Josh Poimboeuf8023e0e2016-12-16 10:05:05 -0600146 */
Josh Poimboeuf87a6b292017-03-13 23:27:47 -0500147 return (state->bp == last_bp ||
148 (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
Josh Poimboeufacb46082016-10-20 11:34:41 -0500149}
150
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500151/*
152 * This determines if the frame pointer actually contains an encoded pointer to
153 * pt_regs on the stack. See ENCODE_FRAME_POINTER.
154 */
155static struct pt_regs *decode_frame_pointer(unsigned long *bp)
156{
157 unsigned long regs = (unsigned long)bp;
158
159 if (!(regs & 0x1))
160 return NULL;
161
162 return (struct pt_regs *)(regs & ~0x1);
163}
164
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500165static bool update_stack_state(struct unwind_state *state,
166 unsigned long *next_bp)
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500167{
168 struct stack_info *info = &state->stack_info;
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500169 enum stack_type prev_type = info->type;
170 struct pt_regs *regs;
Josh Poimboeuf6bcdf9d2017-04-12 13:47:11 -0500171 unsigned long *frame, *prev_frame_end, *addr_p, addr;
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500172 size_t len;
173
174 if (state->regs)
175 prev_frame_end = (void *)state->regs + regs_size(state->regs);
176 else
177 prev_frame_end = (void *)state->bp + FRAME_HEADER_SIZE;
178
179 /* Is the next frame pointer an encoded pointer to pt_regs? */
180 regs = decode_frame_pointer(next_bp);
181 if (regs) {
182 frame = (unsigned long *)regs;
183 len = regs_size(regs);
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500184 state->got_irq = true;
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500185 } else {
186 frame = next_bp;
187 len = FRAME_HEADER_SIZE;
188 }
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500189
190 /*
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500191 * If the next bp isn't on the current stack, switch to the next one.
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500192 *
193 * We may have to traverse multiple stacks to deal with the possibility
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500194 * that info->next_sp could point to an empty stack and the next bp
195 * could be on a subsequent stack.
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500196 */
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500197 while (!on_stack(info, frame, len))
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500198 if (get_stack_info(info->next_sp, state->task, info,
199 &state->stack_mask))
200 return false;
201
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500202 /* Make sure it only unwinds up and doesn't overlap the prev frame: */
203 if (state->orig_sp && state->stack_info.type == prev_type &&
204 frame < prev_frame_end)
205 return false;
206
207 /* Move state to the next frame: */
208 if (regs) {
209 state->regs = regs;
210 state->bp = NULL;
211 } else {
212 state->bp = next_bp;
213 state->regs = NULL;
214 }
215
Josh Poimboeuf6bcdf9d2017-04-12 13:47:11 -0500216 /* Save the return address: */
217 if (state->regs && user_mode(state->regs))
218 state->ip = 0;
219 else {
220 addr_p = unwind_get_return_address_ptr(state);
221 addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
222 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
223 addr, addr_p);
224 }
225
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500226 /* Save the original stack pointer for unwind_dump(): */
Josh Poimboeuf262fa732017-04-25 20:48:52 -0500227 if (!state->orig_sp)
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500228 state->orig_sp = frame;
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -0600229
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500230 return true;
231}
232
233bool unwind_next_frame(struct unwind_state *state)
234{
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500235 struct pt_regs *regs;
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500236 unsigned long *next_bp;
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500237
238 if (unwind_done(state))
239 return false;
240
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500241 /* Have we reached the end? */
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500242 if (state->regs && user_mode(state->regs))
243 goto the_end;
244
Josh Poimboeufacb46082016-10-20 11:34:41 -0500245 if (is_last_task_frame(state)) {
246 regs = task_pt_regs(state->task);
247
248 /*
249 * kthreads (other than the boot CPU's idle thread) have some
250 * partial regs at the end of their stack which were placed
251 * there by copy_thread_tls(). But the regs don't have any
252 * useful information, so we can skip them.
253 *
254 * This user_mode() check is slightly broader than a PF_KTHREAD
255 * check because it also catches the awkward situation where a
256 * newly forked kthread transitions into a user task by calling
257 * do_execve(), which eventually clears PF_KTHREAD.
258 */
259 if (!user_mode(regs))
260 goto the_end;
261
262 /*
263 * We're almost at the end, but not quite: there's still the
264 * syscall regs frame. Entry code doesn't encode the regs
265 * pointer for syscalls, so we have to set it manually.
266 */
267 state->regs = regs;
268 state->bp = NULL;
Josh Poimboeuf6bcdf9d2017-04-12 13:47:11 -0500269 state->ip = 0;
Josh Poimboeufacb46082016-10-20 11:34:41 -0500270 return true;
271 }
272
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500273 /* Get the next frame pointer: */
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500274 if (state->regs)
275 next_bp = (unsigned long *)state->regs->bp;
276 else
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500277 next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500278
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500279 /* Move to the next frame if it's safe: */
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500280 if (!update_stack_state(state, next_bp))
Josh Poimboeufc32c47c2016-10-26 10:41:48 -0500281 goto bad_address;
Josh Poimboeufc32c47c2016-10-26 10:41:48 -0500282
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500283 return true;
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500284
Josh Poimboeufc32c47c2016-10-26 10:41:48 -0500285bad_address:
Josh Poimboeuf900742d2017-01-09 12:00:22 -0600286 /*
287 * When unwinding a non-current task, the task might actually be
288 * running on another CPU, in which case it could be modifying its
289 * stack while we're reading it. This is generally not a problem and
290 * can be ignored as long as the caller understands that unwinding
291 * another task will not always succeed.
292 */
293 if (state->task != current)
294 goto the_end;
295
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500296 /*
297 * Don't warn if the unwinder got lost due to an interrupt in entry
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500298 * code or in the C handler before the first frame pointer got set up:
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500299 */
300 if (state->got_irq && in_entry_code(state->ip))
301 goto the_end;
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500302 if (state->regs &&
303 state->regs->sp >= (unsigned long)last_aligned_frame(state) &&
304 state->regs->sp < (unsigned long)task_pt_regs(state->task))
305 goto the_end;
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500306
Josh Poimboeuf24d86f52016-10-27 08:10:58 -0500307 if (state->regs) {
308 printk_deferred_once(KERN_WARNING
309 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
310 state->regs, state->task->comm,
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500311 state->task->pid, next_bp);
Josh Poimboeufaa4f8532017-04-18 08:12:58 -0500312 unwind_dump(state);
Josh Poimboeuf24d86f52016-10-27 08:10:58 -0500313 } else {
314 printk_deferred_once(KERN_WARNING
315 "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
316 state->bp, state->task->comm,
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500317 state->task->pid, next_bp);
Josh Poimboeufaa4f8532017-04-18 08:12:58 -0500318 unwind_dump(state);
Josh Poimboeuf24d86f52016-10-27 08:10:58 -0500319 }
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500320the_end:
321 state->stack_info.type = STACK_TYPE_UNKNOWN;
322 return false;
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500323}
324EXPORT_SYMBOL_GPL(unwind_next_frame);
325
326void __unwind_start(struct unwind_state *state, struct task_struct *task,
327 struct pt_regs *regs, unsigned long *first_frame)
328{
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500329 unsigned long *bp;
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500330
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500331 memset(state, 0, sizeof(*state));
332 state->task = task;
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500333 state->got_irq = (regs);
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500334
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500335 /* Don't even attempt to start from user mode regs: */
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500336 if (regs && user_mode(regs)) {
337 state->stack_info.type = STACK_TYPE_UNKNOWN;
338 return;
339 }
340
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500341 bp = get_frame_pointer(task, regs);
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500342
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500343 /* Initialize stack info and make sure the frame data is accessible: */
344 get_stack_info(bp, state->task, &state->stack_info,
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500345 &state->stack_mask);
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500346 update_stack_state(state, bp);
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500347
348 /*
349 * The caller can provide the address of the first frame directly
350 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
351 * to start unwinding at. Skip ahead until we reach it.
352 */
353 while (!unwind_done(state) &&
354 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
355 state->bp < first_frame))
356 unwind_next_frame(state);
357}
358EXPORT_SYMBOL_GPL(__unwind_start);