blob: 5744d25c1d33c14ded9f863948c789fab71b0ea6 [file] [log] [blame]
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +02001/*
2 * Dynamic function tracer architecture backend.
3 *
Heiko Carstens3d1e2202014-09-03 13:26:23 +02004 * Copyright IBM Corp. 2009,2014
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +02005 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +01007 * Martin Schwidefsky <schwidefsky@de.ibm.com>
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +02008 */
9
Heiko Carstensc9331462014-10-15 12:17:38 +020010#include <linux/moduleloader.h>
Heiko Carstens88dbd202009-06-12 10:26:46 +020011#include <linux/hardirq.h>
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020012#include <linux/uaccess.h>
13#include <linux/ftrace.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010016#include <linux/kprobes.h>
Heiko Carstens9bf12262009-06-12 10:26:47 +020017#include <trace/syscall.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010018#include <asm/asm-offsets.h>
Heiko Carstensc9331462014-10-15 12:17:38 +020019#include <asm/cacheflush.h>
Heiko Carstens63df41d62013-09-06 19:10:48 +020020#include "entry.h"
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020021
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010022/*
Heiko Carstens53255c92014-10-07 15:45:10 +020023 * The mcount code looks like this:
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010024 * stg %r14,8(%r15) # offset 0
Heiko Carstens3d1e2202014-09-03 13:26:23 +020025 * larl %r1,<&counter> # offset 6
26 * brasl %r14,_mcount # offset 12
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010027 * lg %r14,8(%r15) # offset 18
Heiko Carstensc9331462014-10-15 12:17:38 +020028 * Total length is 24 bytes. Only the first instruction will be patched
29 * by ftrace_make_call / ftrace_make_nop.
Heiko Carstens53255c92014-10-07 15:45:10 +020030 * The enabled ftrace code block looks like this:
Heiko Carstensc9331462014-10-15 12:17:38 +020031 * > brasl %r0,ftrace_caller # offset 0
32 * larl %r1,<&counter> # offset 6
33 * brasl %r14,_mcount # offset 12
34 * lg %r14,8(%r15) # offset 18
Heiko Carstens3d1e2202014-09-03 13:26:23 +020035 * The ftrace function gets called with a non-standard C function call ABI
36 * where r0 contains the return address. It is also expected that the called
37 * function only clobbers r0 and r1, but restores r2-r15.
Heiko Carstensc9331462014-10-15 12:17:38 +020038 * For module code we can't directly jump to ftrace caller, but need a
39 * trampoline (ftrace_plt), which clobbers also r1.
Heiko Carstens3d1e2202014-09-03 13:26:23 +020040 * The return point of the ftrace function has offset 24, so execution
41 * continues behind the mcount block.
Heiko Carstensc9331462014-10-15 12:17:38 +020042 * The disabled ftrace code block looks like this:
43 * > jg .+24 # offset 0
44 * larl %r1,<&counter> # offset 6
45 * brasl %r14,_mcount # offset 12
46 * lg %r14,8(%r15) # offset 18
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010047 * The jg instruction branches to offset 24 to skip as many instructions
48 * as possible.
49 */
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020050
Heiko Carstensc9331462014-10-15 12:17:38 +020051unsigned long ftrace_plt;
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020052
Heiko Carstens10dec7d2014-08-15 13:01:46 +020053int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
54 unsigned long addr)
55{
56 return 0;
57}
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020058
59int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
60 unsigned long addr)
61{
Heiko Carstensc9331462014-10-15 12:17:38 +020062 struct ftrace_insn insn;
63 unsigned short op;
64 void *from, *to;
65 size_t size;
66
67 ftrace_generate_nop_insn(&insn);
68 size = sizeof(insn);
69 from = &insn;
70 to = (void *) rec->ip;
71 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
72 return -EFAULT;
73 /*
74 * If we find a breakpoint instruction, a kprobe has been placed
75 * at the beginning of the function. We write the constant
76 * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
77 * instruction so that the kprobes handler can execute a nop, if it
78 * reaches this breakpoint.
79 */
80 if (op == BREAKPOINT_INSTRUCTION) {
81 size -= 2;
82 from += 2;
83 to += 2;
84 insn.disp = KPROBE_ON_FTRACE_NOP;
Heiko Carstens3d1e2202014-09-03 13:26:23 +020085 }
Heiko Carstensc9331462014-10-15 12:17:38 +020086 if (probe_kernel_write(to, from, size))
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010087 return -EPERM;
88 return 0;
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020089}
90
91int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
92{
Heiko Carstensc9331462014-10-15 12:17:38 +020093 struct ftrace_insn insn;
94 unsigned short op;
95 void *from, *to;
96 size_t size;
97
98 ftrace_generate_call_insn(&insn, rec->ip);
99 size = sizeof(insn);
100 from = &insn;
101 to = (void *) rec->ip;
102 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
103 return -EFAULT;
104 /*
105 * If we find a breakpoint instruction, a kprobe has been placed
106 * at the beginning of the function. We write the constant
107 * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
108 * instruction so that the kprobes handler can execute a brasl if it
109 * reaches this breakpoint.
110 */
111 if (op == BREAKPOINT_INSTRUCTION) {
112 size -= 2;
113 from += 2;
114 to += 2;
115 insn.disp = KPROBE_ON_FTRACE_CALL;
116 }
117 if (probe_kernel_write(to, from, size))
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100118 return -EPERM;
119 return 0;
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +0200120}
121
122int ftrace_update_ftrace_func(ftrace_func_t func)
123{
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +0200124 return 0;
125}
126
Jiri Slaby3a36cb12014-02-24 19:59:59 +0100127int __init ftrace_dyn_arch_init(void)
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +0200128{
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +0200129 return 0;
130}
Heiko Carstens88dbd202009-06-12 10:26:46 +0200131
Heiko Carstensc9331462014-10-15 12:17:38 +0200132static int __init ftrace_plt_init(void)
133{
134 unsigned int *ip;
135
136 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
137 if (!ftrace_plt)
138 panic("cannot allocate ftrace plt\n");
139 ip = (unsigned int *) ftrace_plt;
140 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
141 ip[1] = 0x100a0004;
142 ip[2] = 0x07f10000;
143 ip[3] = FTRACE_ADDR >> 32;
144 ip[4] = FTRACE_ADDR & 0xffffffff;
145 set_memory_ro(ftrace_plt, 1);
146 return 0;
147}
148device_initcall(ftrace_plt_init);
149
Heiko Carstens88dbd202009-06-12 10:26:46 +0200150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Heiko Carstens88dbd202009-06-12 10:26:46 +0200151/*
152 * Hook the return address and push it in the stack of return addresses
153 * in current thread info.
154 */
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100155unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
156 unsigned long ip)
Heiko Carstens88dbd202009-06-12 10:26:46 +0200157{
158 struct ftrace_graph_ent trace;
159
Heiko Carstens88dbd202009-06-12 10:26:46 +0200160 if (unlikely(atomic_read(&current->tracing_graph_pause)))
161 goto out;
Heiko Carstensaca91202013-05-13 14:48:52 +0200162 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
Heiko Carstens05e0baa2013-10-11 08:55:57 +0200163 trace.func = ip;
164 trace.depth = current->curr_ret_stack + 1;
165 /* Only trace if the calling function expects to. */
166 if (!ftrace_graph_entry(&trace))
167 goto out;
Steven Rostedt71e308a2009-06-18 12:45:08 -0400168 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
Heiko Carstens88dbd202009-06-12 10:26:46 +0200169 goto out;
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100170 parent = (unsigned long) return_to_handler;
Heiko Carstens88dbd202009-06-12 10:26:46 +0200171out:
172 return parent;
173}
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100174
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100175/*
176 * Patch the kernel code at ftrace_graph_caller location. The instruction
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200177 * there is branch relative on condition. To enable the ftrace graph code
178 * block, we simply patch the mask field of the instruction to zero and
179 * turn the instruction into a nop.
180 * To disable the ftrace graph code the mask field will be patched to
181 * all ones, which turns the instruction into an unconditional branch.
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100182 */
Heiko Carstens2481a872014-08-15 12:33:46 +0200183int ftrace_enable_ftrace_graph_caller(void)
184{
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200185 u8 op = 0x04; /* set mask field to zero */
Heiko Carstens2481a872014-08-15 12:33:46 +0200186
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200187 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
Heiko Carstens2481a872014-08-15 12:33:46 +0200188}
189
190int ftrace_disable_ftrace_graph_caller(void)
191{
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200192 u8 op = 0xf4; /* set mask field to all ones */
Heiko Carstens2481a872014-08-15 12:33:46 +0200193
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200194 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
Heiko Carstens2481a872014-08-15 12:33:46 +0200195}
196
Heiko Carstens88dbd202009-06-12 10:26:46 +0200197#endif /* CONFIG_FUNCTION_GRAPH_TRACER */