blob: e77cdef9ca29bba1ca37954c6880d2a48354d9cf [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Catalin Marinas60ffc302012-03-05 11:49:27 +00002/*
3 * Copyright (C) 2012 ARM Ltd.
Catalin Marinas60ffc302012-03-05 11:49:27 +00004 */
5#ifndef __ASM_STACKTRACE_H
6#define __ASM_STACKTRACE_H
7
Mark Rutlandf60ad4e2017-07-20 12:26:48 +01008#include <linux/percpu.h>
9#include <linux/sched.h>
10#include <linux/sched/task_stack.h>
Mark Rutland592700f2019-07-02 14:07:29 +010011#include <linux/types.h>
Masami Hiramatsucd9bc2c2021-10-21 09:55:09 +090012#include <linux/llist.h>
Mark Rutlandf60ad4e2017-07-20 12:26:48 +010013
14#include <asm/memory.h>
15#include <asm/ptrace.h>
James Morsef5df2692018-01-08 15:38:12 +000016#include <asm/sdei.h>
AKASHI Takahirofe13f952015-12-15 17:33:40 +090017
Laura Abbott8a1ccfb2018-07-20 14:41:53 -070018enum stack_type {
19 STACK_TYPE_UNKNOWN,
20 STACK_TYPE_TASK,
21 STACK_TYPE_IRQ,
22 STACK_TYPE_OVERFLOW,
23 STACK_TYPE_SDEI_NORMAL,
24 STACK_TYPE_SDEI_CRITICAL,
Mark Rutland592700f2019-07-02 14:07:29 +010025 __NR_STACK_TYPES
Laura Abbott8a1ccfb2018-07-20 14:41:53 -070026};
27
28struct stack_info {
29 unsigned long low;
30 unsigned long high;
31 enum stack_type type;
32};
33
Mark Rutland592700f2019-07-02 14:07:29 +010034/*
35 * A snapshot of a frame record or fp/lr register values, along with some
36 * accounting information necessary for robust unwinding.
37 *
38 * @fp: The fp value in the frame record (or the real fp)
Mark Rutland8d5903f2021-08-02 17:48:44 +010039 * @pc: The lr value in the frame record (or the real lr)
Mark Rutland592700f2019-07-02 14:07:29 +010040 *
41 * @stacks_done: Stacks which have been entirely unwound, for which it is no
42 * longer valid to unwind to.
43 *
44 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
45 * of 0. This is used to ensure that within a stack, each
46 * subsequent frame record is at an increasing address.
47 * @prev_type: The type of stack this frame record was on, or a synthetic
48 * value of STACK_TYPE_UNKNOWN. This is used to detect a
49 * transition from one stack to another.
Mark Rutland1e5428b2021-11-29 14:28:42 +000050 *
51 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
52 * associated with the most recently encountered replacement lr
53 * value.
Mark Rutland592700f2019-07-02 14:07:29 +010054 */
55struct stackframe {
56 unsigned long fp;
57 unsigned long pc;
58 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
59 unsigned long prev_fp;
60 enum stack_type prev_type;
Masami Hiramatsucd9bc2c2021-10-21 09:55:09 +090061#ifdef CONFIG_KRETPROBES
62 struct llist_node *kr_cur;
63#endif
Mark Rutland592700f2019-07-02 14:07:29 +010064};
65
Dmitry Safonovc7689832020-06-08 21:30:23 -070066extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
67 const char *loglvl);
Catalin Marinas60ffc302012-03-05 11:49:27 +000068
Mark Rutlandf60fe782017-07-31 21:17:03 +010069DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
Mark Rutlandf60ad4e2017-07-20 12:26:48 +010070
Peter Collingbourne76734d22021-05-26 10:49:25 -070071static inline bool on_stack(unsigned long sp, unsigned long size,
72 unsigned long low, unsigned long high,
73 enum stack_type type, struct stack_info *info)
Mark Rutlandf60ad4e2017-07-20 12:26:48 +010074{
Mark Rutlandf60fe782017-07-31 21:17:03 +010075 if (!low)
76 return false;
77
Peter Collingbourne76734d22021-05-26 10:49:25 -070078 if (sp < low || sp + size < sp || sp + size > high)
Laura Abbott8a1ccfb2018-07-20 14:41:53 -070079 return false;
80
81 if (info) {
82 info->low = low;
83 info->high = high;
Yunfeng Yebd4298c2020-05-08 11:15:45 +080084 info->type = type;
Laura Abbott8a1ccfb2018-07-20 14:41:53 -070085 }
Laura Abbott8a1ccfb2018-07-20 14:41:53 -070086 return true;
Mark Rutlandf60ad4e2017-07-20 12:26:48 +010087}
88
Peter Collingbourne76734d22021-05-26 10:49:25 -070089static inline bool on_irq_stack(unsigned long sp, unsigned long size,
Yunfeng Yebd4298c2020-05-08 11:15:45 +080090 struct stack_info *info)
91{
92 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
93 unsigned long high = low + IRQ_STACK_SIZE;
94
Peter Collingbourne76734d22021-05-26 10:49:25 -070095 return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
Yunfeng Yebd4298c2020-05-08 11:15:45 +080096}
97
Dave Martin8caa6e22019-07-02 14:07:27 +010098static inline bool on_task_stack(const struct task_struct *tsk,
Peter Collingbourne76734d22021-05-26 10:49:25 -070099 unsigned long sp, unsigned long size,
Dave Martin8caa6e22019-07-02 14:07:27 +0100100 struct stack_info *info)
Mark Rutlandf60ad4e2017-07-20 12:26:48 +0100101{
102 unsigned long low = (unsigned long)task_stack_page(tsk);
103 unsigned long high = low + THREAD_SIZE;
104
Peter Collingbourne76734d22021-05-26 10:49:25 -0700105 return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
Mark Rutlandf60ad4e2017-07-20 12:26:48 +0100106}
107
Mark Rutland872d8322017-07-14 20:30:35 +0100108#ifdef CONFIG_VMAP_STACK
109DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
110
Peter Collingbourne76734d22021-05-26 10:49:25 -0700111static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
Laura Abbott8a1ccfb2018-07-20 14:41:53 -0700112 struct stack_info *info)
Mark Rutland872d8322017-07-14 20:30:35 +0100113{
114 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
115 unsigned long high = low + OVERFLOW_STACK_SIZE;
116
Peter Collingbourne76734d22021-05-26 10:49:25 -0700117 return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
Mark Rutland872d8322017-07-14 20:30:35 +0100118}
119#else
Peter Collingbourne76734d22021-05-26 10:49:25 -0700120static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
Laura Abbott8a1ccfb2018-07-20 14:41:53 -0700121 struct stack_info *info) { return false; }
Mark Rutland872d8322017-07-14 20:30:35 +0100122#endif
123
Laura Abbott8a1ccfb2018-07-20 14:41:53 -0700124
Mark Rutland12964442017-08-01 18:51:15 +0100125/*
126 * We can only safely access per-cpu stacks from current in a non-preemptible
127 * context.
128 */
Dave Martin8caa6e22019-07-02 14:07:27 +0100129static inline bool on_accessible_stack(const struct task_struct *tsk,
Peter Collingbourne76734d22021-05-26 10:49:25 -0700130 unsigned long sp, unsigned long size,
Dave Martin8caa6e22019-07-02 14:07:27 +0100131 struct stack_info *info)
Mark Rutland12964442017-08-01 18:51:15 +0100132{
Mark Rutland592700f2019-07-02 14:07:29 +0100133 if (info)
134 info->type = STACK_TYPE_UNKNOWN;
135
Peter Collingbourne76734d22021-05-26 10:49:25 -0700136 if (on_task_stack(tsk, sp, size, info))
Mark Rutland12964442017-08-01 18:51:15 +0100137 return true;
138 if (tsk != current || preemptible())
139 return false;
Peter Collingbourne76734d22021-05-26 10:49:25 -0700140 if (on_irq_stack(sp, size, info))
Mark Rutland12964442017-08-01 18:51:15 +0100141 return true;
Peter Collingbourne76734d22021-05-26 10:49:25 -0700142 if (on_overflow_stack(sp, size, info))
Mark Rutland872d8322017-07-14 20:30:35 +0100143 return true;
Peter Collingbourne76734d22021-05-26 10:49:25 -0700144 if (on_sdei_stack(sp, size, info))
James Morsef5df2692018-01-08 15:38:12 +0000145 return true;
Mark Rutland12964442017-08-01 18:51:15 +0100146
147 return false;
148}
149
Catalin Marinas60ffc302012-03-05 11:49:27 +0000150#endif /* __ASM_STACKTRACE_H */