blob: 51d14fe5eb9a318349876eb376b359695770a0a2 [file] [log] [blame]
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +02001/*
2 * Dynamic function tracer architecture backend.
3 *
Heiko Carstens3d1e2202014-09-03 13:26:23 +02004 * Copyright IBM Corp. 2009,2014
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +02005 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +01007 * Martin Schwidefsky <schwidefsky@de.ibm.com>
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +02008 */
9
Heiko Carstens88dbd202009-06-12 10:26:46 +020010#include <linux/hardirq.h>
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020011#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010015#include <linux/kprobes.h>
Heiko Carstens9bf12262009-06-12 10:26:47 +020016#include <trace/syscall.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010017#include <asm/asm-offsets.h>
Heiko Carstens63df41d62013-09-06 19:10:48 +020018#include "entry.h"
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020019
Heiko Carstens3d1e2202014-09-03 13:26:23 +020020void mcount_replace_code(void);
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020021void ftrace_disable_code(void);
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010022void ftrace_enable_insn(void);
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020023
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010024/*
Heiko Carstens53255c92014-10-07 15:45:10 +020025 * The mcount code looks like this:
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010026 * stg %r14,8(%r15) # offset 0
Heiko Carstens3d1e2202014-09-03 13:26:23 +020027 * larl %r1,<&counter> # offset 6
28 * brasl %r14,_mcount # offset 12
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010029 * lg %r14,8(%r15) # offset 18
Heiko Carstens3d1e2202014-09-03 13:26:23 +020030 * Total length is 24 bytes. The complete mcount block initially gets replaced
31 * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
32 * only patch the jg/lg instruction within the block.
33 * Note: we do not patch the first instruction to an unconditional branch,
34 * since that would break kprobes/jprobes. It is easier to leave the larl
35 * instruction in and only modify the second instruction.
Heiko Carstens53255c92014-10-07 15:45:10 +020036 * The enabled ftrace code block looks like this:
Heiko Carstens3d1e2202014-09-03 13:26:23 +020037 * larl %r0,.+24 # offset 0
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010038 * > lg %r1,__LC_FTRACE_FUNC # offset 6
Heiko Carstens3d1e2202014-09-03 13:26:23 +020039 * br %r1 # offset 12
40 * brcl 0,0 # offset 14
41 * brc 0,0 # offset 20
42 * The ftrace function gets called with a non-standard C function call ABI
43 * where r0 contains the return address. It is also expected that the called
44 * function only clobbers r0 and r1, but restores r2-r15.
45 * The return point of the ftrace function has offset 24, so execution
46 * continues behind the mcount block.
47 * larl %r0,.+24 # offset 0
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010048 * > jg .+18 # offset 6
Heiko Carstens3d1e2202014-09-03 13:26:23 +020049 * br %r1 # offset 12
50 * brcl 0,0 # offset 14
51 * brc 0,0 # offset 20
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010052 * The jg instruction branches to offset 24 to skip as many instructions
53 * as possible.
54 */
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020055asm(
56 " .align 4\n"
Heiko Carstens3d1e2202014-09-03 13:26:23 +020057 "mcount_replace_code:\n"
58 " larl %r0,0f\n"
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020059 "ftrace_disable_code:\n"
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010060 " jg 0f\n"
Heiko Carstens3d1e2202014-09-03 13:26:23 +020061 " br %r1\n"
62 " brcl 0,0\n"
63 " brc 0,0\n"
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010064 "0:\n"
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020065 " .align 4\n"
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010066 "ftrace_enable_insn:\n"
67 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020068
Heiko Carstens3d1e2202014-09-03 13:26:23 +020069#define MCOUNT_BLOCK_SIZE 24
70#define MCOUNT_INSN_OFFSET 6
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010071#define FTRACE_INSN_SIZE 6
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020072
Heiko Carstens10dec7d2014-08-15 13:01:46 +020073int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
74 unsigned long addr)
75{
76 return 0;
77}
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020078
79int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
80 unsigned long addr)
81{
Heiko Carstens3d1e2202014-09-03 13:26:23 +020082 /* Initial replacement of the whole mcount block */
83 if (addr == MCOUNT_ADDR) {
84 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
85 mcount_replace_code,
86 MCOUNT_BLOCK_SIZE))
87 return -EPERM;
88 return 0;
89 }
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010090 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
91 MCOUNT_INSN_SIZE))
92 return -EPERM;
93 return 0;
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020094}
95
96int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
97{
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +010098 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
99 FTRACE_INSN_SIZE))
100 return -EPERM;
101 return 0;
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +0200102}
103
104int ftrace_update_ftrace_func(ftrace_func_t func)
105{
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +0200106 return 0;
107}
108
Jiri Slaby3a36cb12014-02-24 19:59:59 +0100109int __init ftrace_dyn_arch_init(void)
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +0200110{
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +0200111 return 0;
112}
Heiko Carstens88dbd202009-06-12 10:26:46 +0200113
Heiko Carstens88dbd202009-06-12 10:26:46 +0200114#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Heiko Carstens88dbd202009-06-12 10:26:46 +0200115/*
116 * Hook the return address and push it in the stack of return addresses
117 * in current thread info.
118 */
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100119unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
120 unsigned long ip)
Heiko Carstens88dbd202009-06-12 10:26:46 +0200121{
122 struct ftrace_graph_ent trace;
123
Heiko Carstens88dbd202009-06-12 10:26:46 +0200124 if (unlikely(atomic_read(&current->tracing_graph_pause)))
125 goto out;
Heiko Carstensaca91202013-05-13 14:48:52 +0200126 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
Heiko Carstens05e0baa2013-10-11 08:55:57 +0200127 trace.func = ip;
128 trace.depth = current->curr_ret_stack + 1;
129 /* Only trace if the calling function expects to. */
130 if (!ftrace_graph_entry(&trace))
131 goto out;
Steven Rostedt71e308a2009-06-18 12:45:08 -0400132 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
Heiko Carstens88dbd202009-06-12 10:26:46 +0200133 goto out;
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100134 parent = (unsigned long) return_to_handler;
Heiko Carstens88dbd202009-06-12 10:26:46 +0200135out:
136 return parent;
137}
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100138
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100139/*
140 * Patch the kernel code at ftrace_graph_caller location. The instruction
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200141 * there is branch relative on condition. To enable the ftrace graph code
142 * block, we simply patch the mask field of the instruction to zero and
143 * turn the instruction into a nop.
144 * To disable the ftrace graph code the mask field will be patched to
145 * all ones, which turns the instruction into an unconditional branch.
Martin Schwidefsky4cc9bed2011-01-05 12:48:11 +0100146 */
Heiko Carstens2481a872014-08-15 12:33:46 +0200147int ftrace_enable_ftrace_graph_caller(void)
148{
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200149 u8 op = 0x04; /* set mask field to zero */
Heiko Carstens2481a872014-08-15 12:33:46 +0200150
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200151 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
Heiko Carstens2481a872014-08-15 12:33:46 +0200152}
153
154int ftrace_disable_ftrace_graph_caller(void)
155{
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200156 u8 op = 0xf4; /* set mask field to all ones */
Heiko Carstens2481a872014-08-15 12:33:46 +0200157
Heiko Carstens0cccdda2014-10-08 10:03:08 +0200158 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
Heiko Carstens2481a872014-08-15 12:33:46 +0200159}
160
Heiko Carstens88dbd202009-06-12 10:26:46 +0200161#endif /* CONFIG_FUNCTION_GRAPH_TRACER */