blob: 1dfac634bbf7f2a5470d0c12d2074a9cf71d4fc4 [file] [log] [blame]
Steven Rostedte18eead32014-05-08 15:21:52 -04001/*
Steven Rostedte18eead32014-05-08 15:21:52 -04002 * Copyright (C) 2014 Steven Rostedt, Red Hat Inc
3 */
4
5#include <linux/linkage.h>
6#include <asm/ptrace.h>
7#include <asm/ftrace.h>
Al Viro784d5692016-01-11 11:04:34 -05008#include <asm/export.h>
Steven Rostedte18eead32014-05-08 15:21:52 -04009
10
11 .code64
12 .section .entry.text, "ax"
13
Steven Rostedte18eead32014-05-08 15:21:52 -040014#ifdef CC_USING_FENTRY
15# define function_hook __fentry__
Steven Rostedt5de0a8c2016-10-24 15:01:48 -040016EXPORT_SYMBOL(__fentry__)
Steven Rostedte18eead32014-05-08 15:21:52 -040017#else
18# define function_hook mcount
Steven Rostedt5de0a8c2016-10-24 15:01:48 -040019EXPORT_SYMBOL(mcount)
Steven Rostedte18eead32014-05-08 15:21:52 -040020#endif
21
Steven Rostedt (Red Hat)0687c362014-11-24 18:08:48 -050022/* All cases save the original rbp (8 bytes) */
23#ifdef CONFIG_FRAME_POINTER
24# ifdef CC_USING_FENTRY
25/* Save parent and function stack frames (rip and rbp) */
26# define MCOUNT_FRAME_SIZE (8+16*2)
27# else
28/* Save just function stack frame (rip and rbp) */
29# define MCOUNT_FRAME_SIZE (8+16)
30# endif
31#else
32/* No need to save a stack frame */
33# define MCOUNT_FRAME_SIZE 8
34#endif /* CONFIG_FRAME_POINTER */
35
Steven Rostedt (Red Hat)85f6f022014-11-24 14:26:38 -050036/* Size of stack used to save mcount regs in save_mcount_regs */
Steven Rostedt (Red Hat)0687c362014-11-24 18:08:48 -050037#define MCOUNT_REG_SIZE (SS+8 + MCOUNT_FRAME_SIZE)
Steven Rostedt (Red Hat)85f6f022014-11-24 14:26:38 -050038
Steven Rostedt (Red Hat)05df7102014-11-24 11:43:39 -050039/*
40 * gcc -pg option adds a call to 'mcount' in most functions.
41 * When -mfentry is used, the call is to 'fentry' and not 'mcount'
42 * and is done before the function's stack frame is set up.
43 * They both require a set of regs to be saved before calling
44 * any C code and restored before returning back to the function.
45 *
46 * On boot up, all these calls are converted into nops. When tracing
47 * is enabled, the call can jump to either ftrace_caller or
48 * ftrace_regs_caller. Callbacks (tracing functions) that require
49 * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
50 * it. For this reason, the size of the pt_regs structure will be
51 * allocated on the stack and the required mcount registers will
52 * be saved in the locations that pt_regs has them in.
53 */
54
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -050055/*
56 * @added: the amount of stack added before calling this
57 *
58 * After this is called, the following registers contain:
59 *
60 * %rdi - holds the address that called the trampoline
61 * %rsi - holds the parent function (traced function's return address)
62 * %rdx - holds the original %rbp
63 */
Steven Rostedt (Red Hat)527aa752014-11-24 13:06:05 -050064.macro save_mcount_regs added=0
Steven Rostedt (Red Hat)0687c362014-11-24 18:08:48 -050065
66 /* Always save the original rbp */
67 pushq %rbp
68
69#ifdef CONFIG_FRAME_POINTER
70 /*
71 * Stack traces will stop at the ftrace trampoline if the frame pointer
72 * is not set up properly. If fentry is used, we need to save a frame
73 * pointer for the parent as well as the function traced, because the
74 * fentry is called before the stack frame is set up, where as mcount
75 * is called afterward.
76 */
77#ifdef CC_USING_FENTRY
78 /* Save the parent pointer (skip orig rbp and our return address) */
79 pushq \added+8*2(%rsp)
80 pushq %rbp
81 movq %rsp, %rbp
82 /* Save the return address (now skip orig rbp, rbp and parent) */
83 pushq \added+8*3(%rsp)
84#else
85 /* Can't assume that rip is before this (unless added was zero) */
86 pushq \added+8(%rsp)
87#endif
88 pushq %rbp
89 movq %rsp, %rbp
90#endif /* CONFIG_FRAME_POINTER */
91
92 /*
93 * We add enough stack to save all regs.
94 */
95 subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp
Steven Rostedt (Red Hat)4bcdf152014-11-24 11:30:58 -050096 movq %rax, RAX(%rsp)
97 movq %rcx, RCX(%rsp)
98 movq %rdx, RDX(%rsp)
99 movq %rsi, RSI(%rsp)
100 movq %rdi, RDI(%rsp)
101 movq %r8, R8(%rsp)
102 movq %r9, R9(%rsp)
Steven Rostedt (Red Hat)0687c362014-11-24 18:08:48 -0500103 /*
104 * Save the original RBP. Even though the mcount ABI does not
105 * require this, it helps out callers.
106 */
107 movq MCOUNT_REG_SIZE-8(%rsp), %rdx
108 movq %rdx, RBP(%rsp)
109
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -0500110 /* Copy the parent address into %rsi (second parameter) */
111#ifdef CC_USING_FENTRY
112 movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
113#else
114 /* %rdx contains original %rbp */
115 movq 8(%rdx), %rsi
116#endif
117
Steven Rostedt (Red Hat)4bcdf152014-11-24 11:30:58 -0500118 /* Move RIP to its proper location */
Steven Rostedt (Red Hat)85f6f022014-11-24 14:26:38 -0500119 movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
Steven Rostedt (Red Hat)094dfc52014-11-24 13:21:09 -0500120 movq %rdi, RIP(%rsp)
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -0500121
122 /*
123 * Now %rdi (the first parameter) has the return address of
124 * where ftrace_call returns. But the callbacks expect the
Steven Rostedt (Red Hat)6a06bdb2014-11-24 21:00:34 -0500125 * address of the call itself.
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -0500126 */
127 subq $MCOUNT_INSN_SIZE, %rdi
Steven Rostedt (Red Hat)4bcdf152014-11-24 11:30:58 -0500128 .endm
129
Steven Rostedt (Red Hat)527aa752014-11-24 13:06:05 -0500130.macro restore_mcount_regs
Steven Rostedt (Red Hat)4bcdf152014-11-24 11:30:58 -0500131 movq R9(%rsp), %r9
132 movq R8(%rsp), %r8
133 movq RDI(%rsp), %rdi
134 movq RSI(%rsp), %rsi
135 movq RDX(%rsp), %rdx
136 movq RCX(%rsp), %rcx
137 movq RAX(%rsp), %rax
Steven Rostedt (Red Hat)0687c362014-11-24 18:08:48 -0500138
139 /* ftrace_regs_caller can modify %rbp */
140 movq RBP(%rsp), %rbp
141
Steven Rostedt (Red Hat)85f6f022014-11-24 14:26:38 -0500142 addq $MCOUNT_REG_SIZE, %rsp
Steven Rostedt (Red Hat)0687c362014-11-24 18:08:48 -0500143
Steven Rostedt (Red Hat)4bcdf152014-11-24 11:30:58 -0500144 .endm
145
Steven Rostedt (Red Hat)76c2f132014-11-24 14:54:27 -0500146#ifdef CONFIG_DYNAMIC_FTRACE
147
148ENTRY(function_hook)
149 retq
150END(function_hook)
151
Steven Rostedte18eead32014-05-08 15:21:52 -0400152ENTRY(ftrace_caller)
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -0500153 /* save_mcount_regs fills in first two parameters */
154 save_mcount_regs
155
156GLOBAL(ftrace_caller_op_ptr)
157 /* Load the ftrace_ops into the 3rd parameter */
158 movq function_trace_op(%rip), %rdx
159
Steven Rostedte18eead32014-05-08 15:21:52 -0400160 /* regs go into 4th parameter (but make it NULL) */
161 movq $0, %rcx
162
163GLOBAL(ftrace_call)
164 call ftrace_stub
165
Steven Rostedt (Red Hat)05df7102014-11-24 11:43:39 -0500166 restore_mcount_regs
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400167
168 /*
Borislav Petkovf1b92bb62016-02-16 09:43:21 +0100169 * The copied trampoline must call ftrace_epilogue as it
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400170 * still may need to call the function graph tracer.
Borislav Petkovf1b92bb62016-02-16 09:43:21 +0100171 *
172 * The code up to this label is copied into trampolines so
173 * think twice before adding any new code or changing the
174 * layout here.
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400175 */
Borislav Petkovf1b92bb62016-02-16 09:43:21 +0100176GLOBAL(ftrace_epilogue)
Steven Rostedte18eead32014-05-08 15:21:52 -0400177
178#ifdef CONFIG_FUNCTION_GRAPH_TRACER
179GLOBAL(ftrace_graph_call)
180 jmp ftrace_stub
181#endif
182
Steven Rostedt8329e812016-05-16 23:00:35 -0400183/* This is weak to keep gas from relaxing the jumps */
184WEAK(ftrace_stub)
Steven Rostedte18eead32014-05-08 15:21:52 -0400185 retq
186END(ftrace_caller)
187
188ENTRY(ftrace_regs_caller)
Steven Rostedt (Red Hat)527aa752014-11-24 13:06:05 -0500189 /* Save the current flags before any operations that can change them */
Steven Rostedte18eead32014-05-08 15:21:52 -0400190 pushfq
191
Steven Rostedt (Red Hat)527aa752014-11-24 13:06:05 -0500192 /* added 8 bytes to save flags */
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -0500193 save_mcount_regs 8
194 /* save_mcount_regs fills in first two parameters */
195
196GLOBAL(ftrace_regs_caller_op_ptr)
197 /* Load the ftrace_ops into the 3rd parameter */
198 movq function_trace_op(%rip), %rdx
Steven Rostedte18eead32014-05-08 15:21:52 -0400199
200 /* Save the rest of pt_regs */
201 movq %r15, R15(%rsp)
202 movq %r14, R14(%rsp)
203 movq %r13, R13(%rsp)
204 movq %r12, R12(%rsp)
205 movq %r11, R11(%rsp)
206 movq %r10, R10(%rsp)
Steven Rostedte18eead32014-05-08 15:21:52 -0400207 movq %rbx, RBX(%rsp)
208 /* Copy saved flags */
Steven Rostedt (Red Hat)85f6f022014-11-24 14:26:38 -0500209 movq MCOUNT_REG_SIZE(%rsp), %rcx
Steven Rostedte18eead32014-05-08 15:21:52 -0400210 movq %rcx, EFLAGS(%rsp)
211 /* Kernel segments */
212 movq $__KERNEL_DS, %rcx
213 movq %rcx, SS(%rsp)
214 movq $__KERNEL_CS, %rcx
215 movq %rcx, CS(%rsp)
Steven Rostedt (Red Hat)527aa752014-11-24 13:06:05 -0500216 /* Stack - skipping return address and flags */
Steven Rostedt (Red Hat)85f6f022014-11-24 14:26:38 -0500217 leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
Steven Rostedte18eead32014-05-08 15:21:52 -0400218 movq %rcx, RSP(%rsp)
219
220 /* regs go into 4th parameter */
221 leaq (%rsp), %rcx
222
223GLOBAL(ftrace_regs_call)
224 call ftrace_stub
225
226 /* Copy flags back to SS, to restore them */
227 movq EFLAGS(%rsp), %rax
Steven Rostedt (Red Hat)85f6f022014-11-24 14:26:38 -0500228 movq %rax, MCOUNT_REG_SIZE(%rsp)
Steven Rostedte18eead32014-05-08 15:21:52 -0400229
230 /* Handlers can change the RIP */
231 movq RIP(%rsp), %rax
Steven Rostedt (Red Hat)85f6f022014-11-24 14:26:38 -0500232 movq %rax, MCOUNT_REG_SIZE+8(%rsp)
Steven Rostedte18eead32014-05-08 15:21:52 -0400233
234 /* restore the rest of pt_regs */
235 movq R15(%rsp), %r15
236 movq R14(%rsp), %r14
237 movq R13(%rsp), %r13
238 movq R12(%rsp), %r12
239 movq R10(%rsp), %r10
Steven Rostedte18eead32014-05-08 15:21:52 -0400240 movq RBX(%rsp), %rbx
241
Steven Rostedt (Red Hat)527aa752014-11-24 13:06:05 -0500242 restore_mcount_regs
Steven Rostedte18eead32014-05-08 15:21:52 -0400243
244 /* Restore flags */
245 popfq
246
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400247 /*
Borislav Petkovf1b92bb62016-02-16 09:43:21 +0100248 * As this jmp to ftrace_epilogue can be a short jump
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400249 * it must not be copied into the trampoline.
250 * The trampoline will add the code to jump
251 * to the return.
252 */
253GLOBAL(ftrace_regs_caller_end)
254
Borislav Petkovf1b92bb62016-02-16 09:43:21 +0100255 jmp ftrace_epilogue
Steven Rostedt (Red Hat)fdc841b2014-06-25 11:59:45 -0400256
Steven Rostedte18eead32014-05-08 15:21:52 -0400257END(ftrace_regs_caller)
258
259
260#else /* ! CONFIG_DYNAMIC_FTRACE */
261
262ENTRY(function_hook)
Steven Rostedte18eead32014-05-08 15:21:52 -0400263 cmpq $ftrace_stub, ftrace_trace_function
264 jnz trace
265
Steven Rostedt (Red Hat)62a207d2014-11-24 14:58:17 -0500266fgraph_trace:
Steven Rostedte18eead32014-05-08 15:21:52 -0400267#ifdef CONFIG_FUNCTION_GRAPH_TRACER
268 cmpq $ftrace_stub, ftrace_graph_return
269 jnz ftrace_graph_caller
270
271 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
272 jnz ftrace_graph_caller
273#endif
274
275GLOBAL(ftrace_stub)
276 retq
277
278trace:
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -0500279 /* save_mcount_regs fills in first two parameters */
280 save_mcount_regs
Steven Rostedte18eead32014-05-08 15:21:52 -0400281
Namhyung Kim112677d2015-11-17 09:43:24 +0900282 /*
283 * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
284 * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
285 * ip and parent ip are used and the list function is called when
286 * function tracing is enabled.
287 */
Steven Rostedte18eead32014-05-08 15:21:52 -0400288 call *ftrace_trace_function
289
Steven Rostedt (Red Hat)05df7102014-11-24 11:43:39 -0500290 restore_mcount_regs
Steven Rostedte18eead32014-05-08 15:21:52 -0400291
Steven Rostedt (Red Hat)62a207d2014-11-24 14:58:17 -0500292 jmp fgraph_trace
Steven Rostedte18eead32014-05-08 15:21:52 -0400293END(function_hook)
294#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedte18eead32014-05-08 15:21:52 -0400295
296#ifdef CONFIG_FUNCTION_GRAPH_TRACER
297ENTRY(ftrace_graph_caller)
Steven Rostedt (Red Hat)6a06bdb2014-11-24 21:00:34 -0500298 /* Saves rbp into %rdx and fills first parameter */
Steven Rostedt (Red Hat)05df7102014-11-24 11:43:39 -0500299 save_mcount_regs
Steven Rostedte18eead32014-05-08 15:21:52 -0400300
301#ifdef CC_USING_FENTRY
Steven Rostedt (Red Hat)6a06bdb2014-11-24 21:00:34 -0500302 leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
Steven Rostedte18eead32014-05-08 15:21:52 -0400303 movq $0, %rdx /* No framepointers needed */
304#else
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -0500305 /* Save address of the return address of traced function */
Steven Rostedt (Red Hat)6a06bdb2014-11-24 21:00:34 -0500306 leaq 8(%rdx), %rsi
Steven Rostedt (Red Hat)f1ab00a2014-11-24 21:38:40 -0500307 /* ftrace does sanity checks against frame pointers */
Steven Rostedt (Red Hat)0687c362014-11-24 18:08:48 -0500308 movq (%rdx), %rdx
Steven Rostedte18eead32014-05-08 15:21:52 -0400309#endif
Steven Rostedte18eead32014-05-08 15:21:52 -0400310 call prepare_ftrace_return
311
Steven Rostedt (Red Hat)05df7102014-11-24 11:43:39 -0500312 restore_mcount_regs
Steven Rostedte18eead32014-05-08 15:21:52 -0400313
314 retq
315END(ftrace_graph_caller)
316
317GLOBAL(return_to_handler)
318 subq $24, %rsp
319
320 /* Save the return values */
321 movq %rax, (%rsp)
322 movq %rdx, 8(%rsp)
323 movq %rbp, %rdi
324
325 call ftrace_return_to_handler
326
327 movq %rax, %rdi
328 movq 8(%rsp), %rdx
329 movq (%rsp), %rax
330 addq $24, %rsp
331 jmp *%rdi
332#endif