Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2008 Michael Ellerman, IBM Corporation. |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 7 | #include <linux/kprobes.h> |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 8 | #include <linux/vmalloc.h> |
| 9 | #include <linux/init.h> |
Andrea Righi | 27ac792 | 2008-07-23 21:28:13 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 11 | #include <linux/cpuhotplug.h> |
| 12 | #include <linux/slab.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 13 | #include <linux/uaccess.h> |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 14 | |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 15 | #include <asm/pgtable.h> |
| 16 | #include <asm/tlbflush.h> |
| 17 | #include <asm/page.h> |
| 18 | #include <asm/code-patching.h> |
Christophe Leroy | 252eb55 | 2017-11-21 15:28:20 +0100 | [diff] [blame] | 19 | #include <asm/setup.h> |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 20 | |
Christophe Leroy | 8cf4c05 | 2017-11-24 08:31:07 +0100 | [diff] [blame] | 21 | static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, |
| 22 | unsigned int *patch_addr) |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 23 | { |
Russell Currey | ef29672 | 2019-04-18 16:51:23 +1000 | [diff] [blame] | 24 | int err = 0; |
Steven Rostedt | b6e3796 | 2012-04-26 08:31:18 +0000 | [diff] [blame] | 25 | |
Russell Currey | ef29672 | 2019-04-18 16:51:23 +1000 | [diff] [blame] | 26 | __put_user_asm(instr, patch_addr, err, "stw"); |
Steven Rostedt | b6e3796 | 2012-04-26 08:31:18 +0000 | [diff] [blame] | 27 | if (err) |
| 28 | return err; |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 29 | |
Christophe Leroy | 8cf4c05 | 2017-11-24 08:31:07 +0100 | [diff] [blame] | 30 | asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), |
| 31 | "r" (exec_addr)); |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 32 | |
Steven Rostedt | b6e3796 | 2012-04-26 08:31:18 +0000 | [diff] [blame] | 33 | return 0; |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 34 | } |
| 35 | |
Christophe Leroy | 8183d99 | 2017-11-24 08:31:09 +0100 | [diff] [blame] | 36 | int raw_patch_instruction(unsigned int *addr, unsigned int instr) |
Christophe Leroy | 8cf4c05 | 2017-11-24 08:31:07 +0100 | [diff] [blame] | 37 | { |
| 38 | return __patch_instruction(addr, instr, addr); |
| 39 | } |
| 40 | |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 41 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 42 | static DEFINE_PER_CPU(struct vm_struct *, text_poke_area); |
| 43 | |
| 44 | static int text_area_cpu_up(unsigned int cpu) |
| 45 | { |
| 46 | struct vm_struct *area; |
| 47 | |
| 48 | area = get_vm_area(PAGE_SIZE, VM_ALLOC); |
| 49 | if (!area) { |
| 50 | WARN_ONCE(1, "Failed to create text area for cpu %d\n", |
| 51 | cpu); |
| 52 | return -1; |
| 53 | } |
| 54 | this_cpu_write(text_poke_area, area); |
| 55 | |
| 56 | return 0; |
| 57 | } |
| 58 | |
| 59 | static int text_area_cpu_down(unsigned int cpu) |
| 60 | { |
| 61 | free_vm_area(this_cpu_read(text_poke_area)); |
| 62 | return 0; |
| 63 | } |
| 64 | |
| 65 | /* |
| 66 | * Run as a late init call. This allows all the boot time patching to be done |
| 67 | * simply by patching the code, and then we're called here prior to |
| 68 | * mark_rodata_ro(), which happens after all init calls are run. Although |
| 69 | * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge |
| 70 | * it as being preferable to a kernel that will crash later when someone tries |
| 71 | * to use patch_instruction(). |
| 72 | */ |
| 73 | static int __init setup_text_poke_area(void) |
| 74 | { |
| 75 | BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, |
| 76 | "powerpc/text_poke:online", text_area_cpu_up, |
| 77 | text_area_cpu_down)); |
| 78 | |
| 79 | return 0; |
| 80 | } |
| 81 | late_initcall(setup_text_poke_area); |
| 82 | |
| 83 | /* |
| 84 | * This can be called for kernel text or a module. |
| 85 | */ |
| 86 | static int map_patch_area(void *addr, unsigned long text_poke_addr) |
| 87 | { |
| 88 | unsigned long pfn; |
| 89 | int err; |
| 90 | |
| 91 | if (is_vmalloc_addr(addr)) |
| 92 | pfn = vmalloc_to_pfn(addr); |
| 93 | else |
| 94 | pfn = __pa_symbol(addr) >> PAGE_SHIFT; |
| 95 | |
Christophe Leroy | c766ee7 | 2018-10-09 13:51:45 +0000 | [diff] [blame] | 96 | err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 97 | |
| 98 | pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err); |
| 99 | if (err) |
| 100 | return -1; |
| 101 | |
| 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | static inline int unmap_patch_area(unsigned long addr) |
| 106 | { |
| 107 | pte_t *ptep; |
| 108 | pmd_t *pmdp; |
| 109 | pud_t *pudp; |
| 110 | pgd_t *pgdp; |
| 111 | |
| 112 | pgdp = pgd_offset_k(addr); |
| 113 | if (unlikely(!pgdp)) |
| 114 | return -EINVAL; |
| 115 | |
| 116 | pudp = pud_offset(pgdp, addr); |
| 117 | if (unlikely(!pudp)) |
| 118 | return -EINVAL; |
| 119 | |
| 120 | pmdp = pmd_offset(pudp, addr); |
| 121 | if (unlikely(!pmdp)) |
| 122 | return -EINVAL; |
| 123 | |
| 124 | ptep = pte_offset_kernel(pmdp, addr); |
| 125 | if (unlikely(!ptep)) |
| 126 | return -EINVAL; |
| 127 | |
| 128 | pr_devel("clearing mm %p, pte %p, addr %lx\n", &init_mm, ptep, addr); |
| 129 | |
| 130 | /* |
| 131 | * In hash, pte_clear flushes the tlb, in radix, we have to |
| 132 | */ |
| 133 | pte_clear(&init_mm, addr, ptep); |
| 134 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | |
Christophe Leroy | b45ba4a | 2018-10-01 12:21:10 +0000 | [diff] [blame] | 139 | static int do_patch_instruction(unsigned int *addr, unsigned int instr) |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 140 | { |
| 141 | int err; |
Christophe Leroy | 8cf4c05 | 2017-11-24 08:31:07 +0100 | [diff] [blame] | 142 | unsigned int *patch_addr = NULL; |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 143 | unsigned long flags; |
| 144 | unsigned long text_poke_addr; |
| 145 | unsigned long kaddr = (unsigned long)addr; |
| 146 | |
| 147 | /* |
| 148 | * During early early boot patch_instruction is called |
| 149 | * when text_poke_area is not ready, but we still need |
| 150 | * to allow patching. We just do the plain old patching |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 151 | */ |
Christophe Leroy | 8183d99 | 2017-11-24 08:31:09 +0100 | [diff] [blame] | 152 | if (!this_cpu_read(text_poke_area)) |
Christophe Leroy | 8cf4c05 | 2017-11-24 08:31:07 +0100 | [diff] [blame] | 153 | return raw_patch_instruction(addr, instr); |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 154 | |
| 155 | local_irq_save(flags); |
| 156 | |
| 157 | text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr; |
| 158 | if (map_patch_area(addr, text_poke_addr)) { |
| 159 | err = -1; |
| 160 | goto out; |
| 161 | } |
| 162 | |
Christophe Leroy | 8cf4c05 | 2017-11-24 08:31:07 +0100 | [diff] [blame] | 163 | patch_addr = (unsigned int *)(text_poke_addr) + |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 164 | ((kaddr & ~PAGE_MASK) / sizeof(unsigned int)); |
| 165 | |
Christophe Leroy | 8cf4c05 | 2017-11-24 08:31:07 +0100 | [diff] [blame] | 166 | __patch_instruction(addr, instr, patch_addr); |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 167 | |
| 168 | err = unmap_patch_area(text_poke_addr); |
| 169 | if (err) |
| 170 | pr_warn("failed to unmap %lx\n", text_poke_addr); |
| 171 | |
| 172 | out: |
| 173 | local_irq_restore(flags); |
| 174 | |
| 175 | return err; |
| 176 | } |
| 177 | #else /* !CONFIG_STRICT_KERNEL_RWX */ |
| 178 | |
Christophe Leroy | b45ba4a | 2018-10-01 12:21:10 +0000 | [diff] [blame] | 179 | static int do_patch_instruction(unsigned int *addr, unsigned int instr) |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 180 | { |
Christophe Leroy | 8cf4c05 | 2017-11-24 08:31:07 +0100 | [diff] [blame] | 181 | return raw_patch_instruction(addr, instr); |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
Christophe Leroy | b45ba4a | 2018-10-01 12:21:10 +0000 | [diff] [blame] | 185 | |
| 186 | int patch_instruction(unsigned int *addr, unsigned int instr) |
| 187 | { |
| 188 | /* Make sure we aren't patching a freed init section */ |
| 189 | if (init_mem_is_free && init_section_contains(addr, 4)) { |
| 190 | pr_debug("Skipping init section patching addr: 0x%px\n", addr); |
| 191 | return 0; |
| 192 | } |
| 193 | return do_patch_instruction(addr, instr); |
| 194 | } |
Balbir Singh | 37bc3e5 | 2017-06-29 03:04:05 +1000 | [diff] [blame] | 195 | NOKPROBE_SYMBOL(patch_instruction); |
| 196 | |
Steven Rostedt | b6e3796 | 2012-04-26 08:31:18 +0000 | [diff] [blame] | 197 | int patch_branch(unsigned int *addr, unsigned long target, int flags) |
Michael Ellerman | e7a57273 | 2008-06-24 11:32:22 +1000 | [diff] [blame] | 198 | { |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 199 | unsigned int instr; |
| 200 | |
| 201 | create_branch(&instr, addr, target, flags); |
| 202 | return patch_instruction(addr, instr); |
Michael Ellerman | e7a57273 | 2008-06-24 11:32:22 +1000 | [diff] [blame] | 203 | } |
| 204 | |
Anju T | ebfa50d | 2017-02-08 14:27:30 +0530 | [diff] [blame] | 205 | bool is_offset_in_branch_range(long offset) |
| 206 | { |
| 207 | /* |
| 208 | * Powerpc branch instruction is : |
| 209 | * |
| 210 | * 0 6 30 31 |
| 211 | * +---------+----------------+---+---+ |
| 212 | * | opcode | LI |AA |LK | |
| 213 | * +---------+----------------+---+---+ |
| 214 | * Where AA = 0 and LK = 0 |
| 215 | * |
| 216 | * LI is a signed 24 bits integer. The real branch offset is computed |
| 217 | * by: imm32 = SignExtend(LI:'0b00', 32); |
| 218 | * |
| 219 | * So the maximum forward branch should be: |
| 220 | * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc |
| 221 | * The maximum backward branch should be: |
| 222 | * (0xff800000 << 2) = 0xfe000000 = -0x2000000 |
| 223 | */ |
| 224 | return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3)); |
| 225 | } |
| 226 | |
Anju T | 51c9c08 | 2017-02-08 15:20:51 +0530 | [diff] [blame] | 227 | /* |
| 228 | * Helper to check if a given instruction is a conditional branch |
| 229 | * Derived from the conditional checks in analyse_instr() |
| 230 | */ |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 231 | bool is_conditional_branch(unsigned int instr) |
Anju T | 51c9c08 | 2017-02-08 15:20:51 +0530 | [diff] [blame] | 232 | { |
| 233 | unsigned int opcode = instr >> 26; |
| 234 | |
| 235 | if (opcode == 16) /* bc, bca, bcl, bcla */ |
| 236 | return true; |
| 237 | if (opcode == 19) { |
| 238 | switch ((instr >> 1) & 0x3ff) { |
| 239 | case 16: /* bclr, bclrl */ |
| 240 | case 528: /* bcctr, bcctrl */ |
| 241 | case 560: /* bctar, bctarl */ |
| 242 | return true; |
| 243 | } |
| 244 | } |
| 245 | return false; |
| 246 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 247 | NOKPROBE_SYMBOL(is_conditional_branch); |
Anju T | 51c9c08 | 2017-02-08 15:20:51 +0530 | [diff] [blame] | 248 | |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 249 | int create_branch(unsigned int *instr, |
| 250 | const unsigned int *addr, |
| 251 | unsigned long target, int flags) |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 252 | { |
Michael Ellerman | 053a858 | 2008-06-24 11:32:24 +1000 | [diff] [blame] | 253 | long offset; |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 254 | |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 255 | *instr = 0; |
Michael Ellerman | 053a858 | 2008-06-24 11:32:24 +1000 | [diff] [blame] | 256 | offset = target; |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 257 | if (! (flags & BRANCH_ABSOLUTE)) |
Michael Ellerman | 053a858 | 2008-06-24 11:32:24 +1000 | [diff] [blame] | 258 | offset = offset - (unsigned long)addr; |
| 259 | |
| 260 | /* Check we can represent the target in the instruction format */ |
Anju T | ebfa50d | 2017-02-08 14:27:30 +0530 | [diff] [blame] | 261 | if (!is_offset_in_branch_range(offset)) |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 262 | return 1; |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 263 | |
| 264 | /* Mask out the flags and target, so they don't step on each other. */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 265 | *instr = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC); |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 266 | |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 267 | return 0; |
Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 268 | } |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 269 | |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 270 | int create_cond_branch(unsigned int *instr, const unsigned int *addr, |
| 271 | unsigned long target, int flags) |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 272 | { |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 273 | long offset; |
| 274 | |
| 275 | offset = target; |
| 276 | if (! (flags & BRANCH_ABSOLUTE)) |
| 277 | offset = offset - (unsigned long)addr; |
| 278 | |
| 279 | /* Check we can represent the target in the instruction format */ |
| 280 | if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3) |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 281 | return 1; |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 282 | |
| 283 | /* Mask out the flags and target, so they don't step on each other. */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 284 | *instr = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC); |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 285 | |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 286 | return 0; |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 287 | } |
| 288 | |
| 289 | static unsigned int branch_opcode(unsigned int instr) |
| 290 | { |
| 291 | return (instr >> 26) & 0x3F; |
| 292 | } |
| 293 | |
| 294 | static int instr_is_branch_iform(unsigned int instr) |
| 295 | { |
| 296 | return branch_opcode(instr) == 18; |
| 297 | } |
| 298 | |
| 299 | static int instr_is_branch_bform(unsigned int instr) |
| 300 | { |
| 301 | return branch_opcode(instr) == 16; |
| 302 | } |
| 303 | |
| 304 | int instr_is_relative_branch(unsigned int instr) |
| 305 | { |
| 306 | if (instr & BRANCH_ABSOLUTE) |
| 307 | return 0; |
| 308 | |
| 309 | return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); |
| 310 | } |
| 311 | |
Josh Poimboeuf | b9eab08 | 2017-11-16 11:45:37 -0600 | [diff] [blame] | 312 | int instr_is_relative_link_branch(unsigned int instr) |
| 313 | { |
| 314 | return instr_is_relative_branch(instr) && (instr & BRANCH_SET_LINK); |
| 315 | } |
| 316 | |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 317 | static unsigned long branch_iform_target(const unsigned int *instr) |
| 318 | { |
| 319 | signed long imm; |
| 320 | |
| 321 | imm = *instr & 0x3FFFFFC; |
| 322 | |
| 323 | /* If the top bit of the immediate value is set this is negative */ |
| 324 | if (imm & 0x2000000) |
| 325 | imm -= 0x4000000; |
| 326 | |
| 327 | if ((*instr & BRANCH_ABSOLUTE) == 0) |
| 328 | imm += (unsigned long)instr; |
| 329 | |
| 330 | return (unsigned long)imm; |
| 331 | } |
| 332 | |
| 333 | static unsigned long branch_bform_target(const unsigned int *instr) |
| 334 | { |
| 335 | signed long imm; |
| 336 | |
| 337 | imm = *instr & 0xFFFC; |
| 338 | |
| 339 | /* If the top bit of the immediate value is set this is negative */ |
| 340 | if (imm & 0x8000) |
| 341 | imm -= 0x10000; |
| 342 | |
| 343 | if ((*instr & BRANCH_ABSOLUTE) == 0) |
| 344 | imm += (unsigned long)instr; |
| 345 | |
| 346 | return (unsigned long)imm; |
| 347 | } |
| 348 | |
| 349 | unsigned long branch_target(const unsigned int *instr) |
| 350 | { |
| 351 | if (instr_is_branch_iform(*instr)) |
| 352 | return branch_iform_target(instr); |
| 353 | else if (instr_is_branch_bform(*instr)) |
| 354 | return branch_bform_target(instr); |
| 355 | |
| 356 | return 0; |
| 357 | } |
| 358 | |
| 359 | int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr) |
| 360 | { |
| 361 | if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr)) |
| 362 | return branch_target(instr) == addr; |
| 363 | |
| 364 | return 0; |
| 365 | } |
| 366 | |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 367 | int translate_branch(unsigned int *instr, const unsigned int *dest, |
| 368 | const unsigned int *src) |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 369 | { |
| 370 | unsigned long target; |
| 371 | |
| 372 | target = branch_target(src); |
| 373 | |
| 374 | if (instr_is_branch_iform(*src)) |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 375 | return create_branch(instr, dest, target, *src); |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 376 | else if (instr_is_branch_bform(*src)) |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 377 | return create_cond_branch(instr, dest, target, *src); |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 378 | |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 379 | return 1; |
Michael Ellerman | 411781a | 2008-06-24 11:32:29 +1000 | [diff] [blame] | 380 | } |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 381 | |
Kevin Hao | 1e8341a | 2013-05-12 07:26:22 +0800 | [diff] [blame] | 382 | #ifdef CONFIG_PPC_BOOK3E_64 |
| 383 | void __patch_exception(int exc, unsigned long addr) |
| 384 | { |
| 385 | extern unsigned int interrupt_base_book3e; |
| 386 | unsigned int *ibase = &interrupt_base_book3e; |
| 387 | |
| 388 | /* Our exceptions vectors start with a NOP and -then- a branch |
| 389 | * to deal with single stepping from userspace which stops on |
| 390 | * the second instruction. Thus we need to patch the second |
| 391 | * instruction of the exception, not the first one |
| 392 | */ |
| 393 | |
| 394 | patch_branch(ibase + (exc / 4) + 1, addr, 0); |
| 395 | } |
| 396 | #endif |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 397 | |
| 398 | #ifdef CONFIG_CODE_PATCHING_SELFTEST |
| 399 | |
| 400 | static void __init test_trampoline(void) |
| 401 | { |
| 402 | asm ("nop;\n"); |
| 403 | } |
| 404 | |
| 405 | #define check(x) \ |
| 406 | if (!(x)) printk("code-patching: test failed at line %d\n", __LINE__); |
| 407 | |
| 408 | static void __init test_branch_iform(void) |
| 409 | { |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 410 | int err; |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 411 | unsigned int instr; |
| 412 | unsigned long addr; |
| 413 | |
| 414 | addr = (unsigned long)&instr; |
| 415 | |
| 416 | /* The simplest case, branch to self, no flags */ |
| 417 | check(instr_is_branch_iform(0x48000000)); |
| 418 | /* All bits of target set, and flags */ |
| 419 | check(instr_is_branch_iform(0x4bffffff)); |
| 420 | /* High bit of opcode set, which is wrong */ |
| 421 | check(!instr_is_branch_iform(0xcbffffff)); |
| 422 | /* Middle bits of opcode set, which is wrong */ |
| 423 | check(!instr_is_branch_iform(0x7bffffff)); |
| 424 | |
| 425 | /* Simplest case, branch to self with link */ |
| 426 | check(instr_is_branch_iform(0x48000001)); |
| 427 | /* All bits of targets set */ |
| 428 | check(instr_is_branch_iform(0x4bfffffd)); |
| 429 | /* Some bits of targets set */ |
| 430 | check(instr_is_branch_iform(0x4bff00fd)); |
| 431 | /* Must be a valid branch to start with */ |
| 432 | check(!instr_is_branch_iform(0x7bfffffd)); |
| 433 | |
| 434 | /* Absolute branch to 0x100 */ |
| 435 | instr = 0x48000103; |
| 436 | check(instr_is_branch_to_addr(&instr, 0x100)); |
| 437 | /* Absolute branch to 0x420fc */ |
| 438 | instr = 0x480420ff; |
| 439 | check(instr_is_branch_to_addr(&instr, 0x420fc)); |
| 440 | /* Maximum positive relative branch, + 20MB - 4B */ |
| 441 | instr = 0x49fffffc; |
| 442 | check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC)); |
| 443 | /* Smallest negative relative branch, - 4B */ |
| 444 | instr = 0x4bfffffc; |
| 445 | check(instr_is_branch_to_addr(&instr, addr - 4)); |
| 446 | /* Largest negative relative branch, - 32 MB */ |
| 447 | instr = 0x4a000000; |
| 448 | check(instr_is_branch_to_addr(&instr, addr - 0x2000000)); |
| 449 | |
| 450 | /* Branch to self, with link */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 451 | err = create_branch(&instr, &instr, addr, BRANCH_SET_LINK); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 452 | check(instr_is_branch_to_addr(&instr, addr)); |
| 453 | |
| 454 | /* Branch to self - 0x100, with link */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 455 | err = create_branch(&instr, &instr, addr - 0x100, BRANCH_SET_LINK); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 456 | check(instr_is_branch_to_addr(&instr, addr - 0x100)); |
| 457 | |
| 458 | /* Branch to self + 0x100, no link */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 459 | err = create_branch(&instr, &instr, addr + 0x100, 0); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 460 | check(instr_is_branch_to_addr(&instr, addr + 0x100)); |
| 461 | |
| 462 | /* Maximum relative negative offset, - 32 MB */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 463 | err = create_branch(&instr, &instr, addr - 0x2000000, BRANCH_SET_LINK); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 464 | check(instr_is_branch_to_addr(&instr, addr - 0x2000000)); |
| 465 | |
| 466 | /* Out of range relative negative offset, - 32 MB + 4*/ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 467 | err = create_branch(&instr, &instr, addr - 0x2000004, BRANCH_SET_LINK); |
| 468 | check(err); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 469 | |
| 470 | /* Out of range relative positive offset, + 32 MB */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 471 | err = create_branch(&instr, &instr, addr + 0x2000000, BRANCH_SET_LINK); |
| 472 | check(err); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 473 | |
| 474 | /* Unaligned target */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 475 | err = create_branch(&instr, &instr, addr + 3, BRANCH_SET_LINK); |
| 476 | check(err); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 477 | |
| 478 | /* Check flags are masked correctly */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 479 | err = create_branch(&instr, &instr, addr, 0xFFFFFFFC); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 480 | check(instr_is_branch_to_addr(&instr, addr)); |
| 481 | check(instr == 0x48000000); |
| 482 | } |
| 483 | |
| 484 | static void __init test_create_function_call(void) |
| 485 | { |
| 486 | unsigned int *iptr; |
| 487 | unsigned long dest; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 488 | unsigned int instr; |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 489 | |
| 490 | /* Check we can create a function call */ |
| 491 | iptr = (unsigned int *)ppc_function_entry(test_trampoline); |
| 492 | dest = ppc_function_entry(test_create_function_call); |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 493 | create_branch(&instr, iptr, dest, BRANCH_SET_LINK); |
| 494 | patch_instruction(iptr, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 495 | check(instr_is_branch_to_addr(iptr, dest)); |
| 496 | } |
| 497 | |
| 498 | static void __init test_branch_bform(void) |
| 499 | { |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 500 | int err; |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 501 | unsigned long addr; |
| 502 | unsigned int *iptr, instr, flags; |
| 503 | |
| 504 | iptr = &instr; |
| 505 | addr = (unsigned long)iptr; |
| 506 | |
| 507 | /* The simplest case, branch to self, no flags */ |
| 508 | check(instr_is_branch_bform(0x40000000)); |
| 509 | /* All bits of target set, and flags */ |
| 510 | check(instr_is_branch_bform(0x43ffffff)); |
| 511 | /* High bit of opcode set, which is wrong */ |
| 512 | check(!instr_is_branch_bform(0xc3ffffff)); |
| 513 | /* Middle bits of opcode set, which is wrong */ |
| 514 | check(!instr_is_branch_bform(0x7bffffff)); |
| 515 | |
| 516 | /* Absolute conditional branch to 0x100 */ |
| 517 | instr = 0x43ff0103; |
| 518 | check(instr_is_branch_to_addr(&instr, 0x100)); |
| 519 | /* Absolute conditional branch to 0x20fc */ |
| 520 | instr = 0x43ff20ff; |
| 521 | check(instr_is_branch_to_addr(&instr, 0x20fc)); |
| 522 | /* Maximum positive relative conditional branch, + 32 KB - 4B */ |
| 523 | instr = 0x43ff7ffc; |
| 524 | check(instr_is_branch_to_addr(&instr, addr + 0x7FFC)); |
| 525 | /* Smallest negative relative conditional branch, - 4B */ |
| 526 | instr = 0x43fffffc; |
| 527 | check(instr_is_branch_to_addr(&instr, addr - 4)); |
| 528 | /* Largest negative relative conditional branch, - 32 KB */ |
| 529 | instr = 0x43ff8000; |
| 530 | check(instr_is_branch_to_addr(&instr, addr - 0x8000)); |
| 531 | |
| 532 | /* All condition code bits set & link */ |
| 533 | flags = 0x3ff000 | BRANCH_SET_LINK; |
| 534 | |
| 535 | /* Branch to self */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 536 | err = create_cond_branch(&instr, iptr, addr, flags); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 537 | check(instr_is_branch_to_addr(&instr, addr)); |
| 538 | |
| 539 | /* Branch to self - 0x100 */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 540 | err = create_cond_branch(&instr, iptr, addr - 0x100, flags); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 541 | check(instr_is_branch_to_addr(&instr, addr - 0x100)); |
| 542 | |
| 543 | /* Branch to self + 0x100 */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 544 | err = create_cond_branch(&instr, iptr, addr + 0x100, flags); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 545 | check(instr_is_branch_to_addr(&instr, addr + 0x100)); |
| 546 | |
| 547 | /* Maximum relative negative offset, - 32 KB */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 548 | err = create_cond_branch(&instr, iptr, addr - 0x8000, flags); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 549 | check(instr_is_branch_to_addr(&instr, addr - 0x8000)); |
| 550 | |
| 551 | /* Out of range relative negative offset, - 32 KB + 4*/ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 552 | err = create_cond_branch(&instr, iptr, addr - 0x8004, flags); |
| 553 | check(err); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 554 | |
| 555 | /* Out of range relative positive offset, + 32 KB */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 556 | err = create_cond_branch(&instr, iptr, addr + 0x8000, flags); |
| 557 | check(err); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 558 | |
| 559 | /* Unaligned target */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 560 | err = create_cond_branch(&instr, iptr, addr + 3, flags); |
| 561 | check(err); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 562 | |
| 563 | /* Check flags are masked correctly */ |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 564 | err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 565 | check(instr_is_branch_to_addr(&instr, addr)); |
| 566 | check(instr == 0x43FF0000); |
| 567 | } |
| 568 | |
| 569 | static void __init test_translate_branch(void) |
| 570 | { |
| 571 | unsigned long addr; |
| 572 | unsigned int *p, *q; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 573 | unsigned int instr; |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 574 | void *buf; |
| 575 | |
| 576 | buf = vmalloc(PAGE_ALIGN(0x2000000 + 1)); |
| 577 | check(buf); |
| 578 | if (!buf) |
| 579 | return; |
| 580 | |
| 581 | /* Simple case, branch to self moved a little */ |
| 582 | p = buf; |
| 583 | addr = (unsigned long)p; |
| 584 | patch_branch(p, addr, 0); |
| 585 | check(instr_is_branch_to_addr(p, addr)); |
| 586 | q = p + 1; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 587 | translate_branch(&instr, q, p); |
| 588 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 589 | check(instr_is_branch_to_addr(q, addr)); |
| 590 | |
| 591 | /* Maximum negative case, move b . to addr + 32 MB */ |
| 592 | p = buf; |
| 593 | addr = (unsigned long)p; |
| 594 | patch_branch(p, addr, 0); |
| 595 | q = buf + 0x2000000; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 596 | translate_branch(&instr, q, p); |
| 597 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 598 | check(instr_is_branch_to_addr(p, addr)); |
| 599 | check(instr_is_branch_to_addr(q, addr)); |
| 600 | check(*q == 0x4a000000); |
| 601 | |
| 602 | /* Maximum positive case, move x to x - 32 MB + 4 */ |
| 603 | p = buf + 0x2000000; |
| 604 | addr = (unsigned long)p; |
| 605 | patch_branch(p, addr, 0); |
| 606 | q = buf + 4; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 607 | translate_branch(&instr, q, p); |
| 608 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 609 | check(instr_is_branch_to_addr(p, addr)); |
| 610 | check(instr_is_branch_to_addr(q, addr)); |
| 611 | check(*q == 0x49fffffc); |
| 612 | |
| 613 | /* Jump to x + 16 MB moved to x + 20 MB */ |
| 614 | p = buf; |
| 615 | addr = 0x1000000 + (unsigned long)buf; |
| 616 | patch_branch(p, addr, BRANCH_SET_LINK); |
| 617 | q = buf + 0x1400000; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 618 | translate_branch(&instr, q, p); |
| 619 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 620 | check(instr_is_branch_to_addr(p, addr)); |
| 621 | check(instr_is_branch_to_addr(q, addr)); |
| 622 | |
| 623 | /* Jump to x + 16 MB moved to x - 16 MB + 4 */ |
| 624 | p = buf + 0x1000000; |
| 625 | addr = 0x2000000 + (unsigned long)buf; |
| 626 | patch_branch(p, addr, 0); |
| 627 | q = buf + 4; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 628 | translate_branch(&instr, q, p); |
| 629 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 630 | check(instr_is_branch_to_addr(p, addr)); |
| 631 | check(instr_is_branch_to_addr(q, addr)); |
| 632 | |
| 633 | |
| 634 | /* Conditional branch tests */ |
| 635 | |
| 636 | /* Simple case, branch to self moved a little */ |
| 637 | p = buf; |
| 638 | addr = (unsigned long)p; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 639 | create_cond_branch(&instr, p, addr, 0); |
| 640 | patch_instruction(p, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 641 | check(instr_is_branch_to_addr(p, addr)); |
| 642 | q = p + 1; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 643 | translate_branch(&instr, q, p); |
| 644 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 645 | check(instr_is_branch_to_addr(q, addr)); |
| 646 | |
| 647 | /* Maximum negative case, move b . to addr + 32 KB */ |
| 648 | p = buf; |
| 649 | addr = (unsigned long)p; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 650 | create_cond_branch(&instr, p, addr, 0xFFFFFFFC); |
| 651 | patch_instruction(p, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 652 | q = buf + 0x8000; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 653 | translate_branch(&instr, q, p); |
| 654 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 655 | check(instr_is_branch_to_addr(p, addr)); |
| 656 | check(instr_is_branch_to_addr(q, addr)); |
| 657 | check(*q == 0x43ff8000); |
| 658 | |
| 659 | /* Maximum positive case, move x to x - 32 KB + 4 */ |
| 660 | p = buf + 0x8000; |
| 661 | addr = (unsigned long)p; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 662 | create_cond_branch(&instr, p, addr, 0xFFFFFFFC); |
| 663 | patch_instruction(p, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 664 | q = buf + 4; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 665 | translate_branch(&instr, q, p); |
| 666 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 667 | check(instr_is_branch_to_addr(p, addr)); |
| 668 | check(instr_is_branch_to_addr(q, addr)); |
| 669 | check(*q == 0x43ff7ffc); |
| 670 | |
| 671 | /* Jump to x + 12 KB moved to x + 20 KB */ |
| 672 | p = buf; |
| 673 | addr = 0x3000 + (unsigned long)buf; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 674 | create_cond_branch(&instr, p, addr, BRANCH_SET_LINK); |
| 675 | patch_instruction(p, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 676 | q = buf + 0x5000; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 677 | translate_branch(&instr, q, p); |
| 678 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 679 | check(instr_is_branch_to_addr(p, addr)); |
| 680 | check(instr_is_branch_to_addr(q, addr)); |
| 681 | |
| 682 | /* Jump to x + 8 KB moved to x - 8 KB + 4 */ |
| 683 | p = buf + 0x2000; |
| 684 | addr = 0x4000 + (unsigned long)buf; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 685 | create_cond_branch(&instr, p, addr, 0); |
| 686 | patch_instruction(p, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 687 | q = buf + 4; |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame^] | 688 | translate_branch(&instr, q, p); |
| 689 | patch_instruction(q, instr); |
Michael Ellerman | ae0dc73 | 2008-06-24 11:32:32 +1000 | [diff] [blame] | 690 | check(instr_is_branch_to_addr(p, addr)); |
| 691 | check(instr_is_branch_to_addr(q, addr)); |
| 692 | |
| 693 | /* Free the buffer we were using */ |
| 694 | vfree(buf); |
| 695 | } |
| 696 | |
| 697 | static int __init test_code_patching(void) |
| 698 | { |
| 699 | printk(KERN_DEBUG "Running code patching self-tests ...\n"); |
| 700 | |
| 701 | test_branch_iform(); |
| 702 | test_branch_bform(); |
| 703 | test_create_function_call(); |
| 704 | test_translate_branch(); |
| 705 | |
| 706 | return 0; |
| 707 | } |
| 708 | late_initcall(test_code_patching); |
| 709 | |
| 710 | #endif /* CONFIG_CODE_PATCHING_SELFTEST */ |