blob: d66446b572c064b198e6f0efee79f1caf451d117 [file] [log] [blame]
Mike Frysinger1c873be2009-06-09 07:25:09 -04001/*
2 * mcount and friends -- ftrace stuff
3 *
Mike Frysingeraebfef02010-01-22 07:35:20 -05004 * Copyright (C) 2009-2010 Analog Devices Inc.
Mike Frysinger1c873be2009-06-09 07:25:09 -04005 * Licensed under the GPL-2 or later.
6 */
7
8#include <linux/linkage.h>
9#include <asm/ftrace.h>
10
11.text
12
13/* GCC will have called us before setting up the function prologue, so we
14 * can clobber the normal scratch registers, but we need to make sure to
15 * save/restore the registers used for argument passing (R0-R2) in case
16 * the profiled function is using them. With data registers, R3 is the
17 * only one we can blow away. With pointer registers, we have P0-P2.
18 *
19 * Upon entry, the RETS will point to the top of the current profiled
Yi Li5bf9cbe2009-09-15 09:24:31 +000020 * function. And since GCC pushed the previous RETS for us, the previous
21 * function will be waiting there. mmmm pie.
Mike Frysinger1c873be2009-06-09 07:25:09 -040022 */
23ENTRY(__mcount)
Mike Frysingeraebfef02010-01-22 07:35:20 -050024#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
25 /* optional micro optimization: return if stopped */
26 p1.l = _function_trace_stop;
27 p1.h = _function_trace_stop;
28 r3 = [p1];
29 cc = r3 == 0;
30 if ! cc jump _ftrace_stub (bp);
31#endif
32
Mike Frysinger1c873be2009-06-09 07:25:09 -040033 /* save third function arg early so we can do testing below */
34 [--sp] = r2;
35
36 /* load the function pointer to the tracer */
37 p0.l = _ftrace_trace_function;
38 p0.h = _ftrace_trace_function;
39 r3 = [p0];
40
41 /* optional micro optimization: don't call the stub tracer */
42 r2.l = _ftrace_stub;
43 r2.h = _ftrace_stub;
44 cc = r2 == r3;
45 if ! cc jump .Ldo_trace;
46
Mike Frysinger1ee76d72009-06-10 04:45:29 -040047#ifdef CONFIG_FUNCTION_GRAPH_TRACER
48 /* if the ftrace_graph_return function pointer is not set to
49 * the ftrace_stub entry, call prepare_ftrace_return().
50 */
51 p0.l = _ftrace_graph_return;
52 p0.h = _ftrace_graph_return;
53 r3 = [p0];
54 cc = r2 == r3;
55 if ! cc jump _ftrace_graph_caller;
56
57 /* similarly, if the ftrace_graph_entry function pointer is not
58 * set to the ftrace_graph_entry_stub entry, ...
59 */
60 p0.l = _ftrace_graph_entry;
61 p0.h = _ftrace_graph_entry;
62 r2.l = _ftrace_graph_entry_stub;
63 r2.h = _ftrace_graph_entry_stub;
64 r3 = [p0];
65 cc = r2 == r3;
66 if ! cc jump _ftrace_graph_caller;
67#endif
68
Mike Frysinger1c873be2009-06-09 07:25:09 -040069 r2 = [sp++];
70 rts;
71
72.Ldo_trace:
73
74 /* save first/second function arg and the return register */
75 [--sp] = r0;
76 [--sp] = r1;
77 [--sp] = rets;
78
79 /* setup the tracer function */
80 p0 = r3;
81
Yi Li5bf9cbe2009-09-15 09:24:31 +000082 /* function_trace_call(unsigned long ip, unsigned long parent_ip):
83 * ip: this point was called by ...
84 * parent_ip: ... this function
85 * the ip itself will need adjusting for the mcount call
Mike Frysinger1c873be2009-06-09 07:25:09 -040086 */
Yi Li5bf9cbe2009-09-15 09:24:31 +000087 r0 = rets;
88 r1 = [sp + 16]; /* skip the 4 local regs on stack */
89 r0 += -MCOUNT_INSN_SIZE;
Mike Frysinger1c873be2009-06-09 07:25:09 -040090
91 /* call the tracer */
92 call (p0);
93
94 /* restore state and get out of dodge */
Mike Frysinger1ee76d72009-06-10 04:45:29 -040095.Lfinish_trace:
Mike Frysinger1c873be2009-06-09 07:25:09 -040096 rets = [sp++];
97 r1 = [sp++];
98 r0 = [sp++];
99 r2 = [sp++];
100
101.globl _ftrace_stub
102_ftrace_stub:
103 rts;
104ENDPROC(__mcount)
Mike Frysinger1ee76d72009-06-10 04:45:29 -0400105
106#ifdef CONFIG_FUNCTION_GRAPH_TRACER
107/* The prepare_ftrace_return() function is similar to the trace function
108 * except it takes a pointer to the location of the frompc. This is so
109 * the prepare_ftrace_return() can hijack it temporarily for probing
110 * purposes.
111 */
112ENTRY(_ftrace_graph_caller)
113 /* save first/second function arg and the return register */
114 [--sp] = r0;
115 [--sp] = r1;
116 [--sp] = rets;
117
Mike Frysingerb73faf72010-01-22 07:59:32 -0500118 /* prepare_ftrace_return(parent, self_addr, frame_pointer) */
119 r0 = sp; /* unsigned long *parent */
120 r1 = rets; /* unsigned long self_addr */
121#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
122 r2 = fp; /* unsigned long frame_pointer */
123#endif
Yi Li5bf9cbe2009-09-15 09:24:31 +0000124 r0 += 16; /* skip the 4 local regs on stack */
Mike Frysinger1ee76d72009-06-10 04:45:29 -0400125 r1 += -MCOUNT_INSN_SIZE;
126 call _prepare_ftrace_return;
127
128 jump .Lfinish_trace;
129ENDPROC(_ftrace_graph_caller)
130
131/* Undo the rewrite caused by ftrace_graph_caller(). The common function
132 * ftrace_return_to_handler() will return the original rets so we can
133 * restore it and be on our way.
134 */
135ENTRY(_return_to_handler)
136 /* make sure original return values are saved */
137 [--sp] = p0;
138 [--sp] = r0;
139 [--sp] = r1;
140
141 /* get original return address */
Mike Frysingerb73faf72010-01-22 07:59:32 -0500142#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
143 r0 = fp; /* Blackfin is sane, so omit this */
144#endif
Mike Frysinger1ee76d72009-06-10 04:45:29 -0400145 call _ftrace_return_to_handler;
146 rets = r0;
147
148 /* anomaly 05000371 - make sure we have at least three instructions
149 * between rets setting and the return
150 */
151 r1 = [sp++];
152 r0 = [sp++];
153 p0 = [sp++];
154 rts;
155ENDPROC(_return_to_handler)
156#endif