blob: 81c48144c504af2909adf81b5fcb47a545dd016c [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -07002/*
3 * Copyright (C) 2008 ARM Limited
4 * Copyright (C) 2014 Regents of the University of California
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -07005 */
6
7#include <linux/export.h>
8#include <linux/kallsyms.h>
9#include <linux/sched.h>
10#include <linux/sched/debug.h>
11#include <linux/sched/task_stack.h>
12#include <linux/stacktrace.h>
Alan Kaob785ec12018-02-13 13:13:21 +080013#include <linux/ftrace.h>
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070014
Kefeng Wang99c168f2020-11-13 14:42:21 +080015#include <asm/stacktrace.h>
16
Guenter Roeckaf2bdf82020-04-13 09:12:34 -070017register unsigned long sp_in_global __asm__("sp");
18
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070019#ifdef CONFIG_FRAME_POINTER
20
Mao Handbeb90b2019-08-29 14:57:00 +080021void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
22 bool (*fn)(unsigned long, void *), void *arg)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070023{
24 unsigned long fp, sp, pc;
25
26 if (regs) {
Christoph Hellwig6ab77af2019-04-15 11:14:40 +020027 fp = frame_pointer(regs);
28 sp = user_stack_pointer(regs);
29 pc = instruction_pointer(regs);
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070030 } else if (task == NULL || task == current) {
Palmer Dabbelt52e7c522020-02-27 11:07:28 -080031 const register unsigned long current_sp = sp_in_global;
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070032 fp = (unsigned long)__builtin_frame_address(0);
33 sp = current_sp;
34 pc = (unsigned long)walk_stackframe;
35 } else {
36 /* task blocked in __switch_to */
37 fp = task->thread.s[0];
38 sp = task->thread.sp;
39 pc = task->thread.ra;
40 }
41
42 for (;;) {
43 unsigned long low, high;
44 struct stackframe *frame;
45
Kefeng Wang9dd97062020-11-13 14:42:22 +080046 if (unlikely(!__kernel_text_address(pc) || fn(arg, pc)))
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070047 break;
48
49 /* Validate frame pointer */
50 low = sp + sizeof(struct stackframe);
51 high = ALIGN(sp, THREAD_SIZE);
52 if (unlikely(fp < low || fp > high || fp & 0x7))
53 break;
54 /* Unwind stack frame */
55 frame = (struct stackframe *)fp - 1;
56 sp = fp;
57 fp = frame->fp;
Alan Kaob785ec12018-02-13 13:13:21 +080058 pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
59 (unsigned long *)(fp - 8));
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070060 }
61}
62
63#else /* !CONFIG_FRAME_POINTER */
64
Kefeng Wang0502bee2020-05-11 10:19:53 +080065void notrace walk_stackframe(struct task_struct *task,
Kefeng Wang9dd97062020-11-13 14:42:22 +080066 struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070067{
68 unsigned long sp, pc;
69 unsigned long *ksp;
70
71 if (regs) {
Christoph Hellwig6ab77af2019-04-15 11:14:40 +020072 sp = user_stack_pointer(regs);
73 pc = instruction_pointer(regs);
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070074 } else if (task == NULL || task == current) {
Palmer Dabbelt52e7c522020-02-27 11:07:28 -080075 sp = sp_in_global;
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070076 pc = (unsigned long)walk_stackframe;
77 } else {
78 /* task blocked in __switch_to */
79 sp = task->thread.sp;
80 pc = task->thread.ra;
81 }
82
83 if (unlikely(sp & 0x7))
84 return;
85
86 ksp = (unsigned long *)sp;
87 while (!kstack_end(ksp)) {
Kefeng Wang9dd97062020-11-13 14:42:22 +080088 if (__kernel_text_address(pc) && unlikely(fn(arg, pc)))
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070089 break;
90 pc = (*ksp++) - 0x4;
91 }
92}
93
94#endif /* CONFIG_FRAME_POINTER */
95
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -070096static bool print_trace_address(unsigned long pc, void *arg)
97{
Dmitry Safonov0b3d4362020-06-08 21:31:17 -070098 const char *loglvl = arg;
99
100 print_ip_sym(loglvl, pc);
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700101 return false;
102}
103
Dmitry Safonov9cb8f062020-06-08 21:32:29 -0700104void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
Dmitry Safonov0b3d4362020-06-08 21:31:17 -0700105{
106 pr_cont("Call Trace:\n");
107 walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
108}
109
Kefeng Wang9dd97062020-11-13 14:42:22 +0800110static bool save_wchan(void *arg, unsigned long pc)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700111{
112 if (!in_sched_functions(pc)) {
113 unsigned long *p = arg;
114 *p = pc;
115 return true;
116 }
117 return false;
118}
119
120unsigned long get_wchan(struct task_struct *task)
121{
122 unsigned long pc = 0;
123
124 if (likely(task && task != current && task->state != TASK_RUNNING))
125 walk_stackframe(task, NULL, save_wchan, &pc);
126 return pc;
127}
128
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700129#ifdef CONFIG_STACKTRACE
130
131static bool __save_trace(unsigned long pc, void *arg, bool nosched)
132{
133 struct stack_trace *trace = arg;
134
135 if (unlikely(nosched && in_sched_functions(pc)))
136 return false;
137 if (unlikely(trace->skip > 0)) {
138 trace->skip--;
139 return false;
140 }
141
142 trace->entries[trace->nr_entries++] = pc;
143 return (trace->nr_entries >= trace->max_entries);
144}
145
Kefeng Wang9dd97062020-11-13 14:42:22 +0800146static bool save_trace(void *arg, unsigned long pc)
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700147{
148 return __save_trace(pc, arg, false);
149}
150
151/*
152 * Save stack-backtrace addresses into a stack_trace buffer.
153 */
154void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
155{
156 walk_stackframe(tsk, NULL, save_trace, trace);
Palmer Dabbelt5d8544e2017-07-10 18:03:19 -0700157}
158EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
159
160void save_stack_trace(struct stack_trace *trace)
161{
162 save_stack_trace_tsk(NULL, trace);
163}
164EXPORT_SYMBOL_GPL(save_stack_trace);
165
166#endif /* CONFIG_STACKTRACE */