blob: e9e828b6bb30622d026f89f9f25bbc96bb2ea3a7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Rabin Vincentb21d55e2012-02-18 17:50:51 +01002#include <linux/kernel.h>
Rabin Vincentab0615e2014-04-24 23:28:57 +02003#include <linux/spinlock.h>
Rabin Vincentb21d55e2012-02-18 17:50:51 +01004#include <linux/kprobes.h>
Rabin Vincentab0615e2014-04-24 23:28:57 +02005#include <linux/mm.h>
Rabin Vincentb21d55e2012-02-18 17:50:51 +01006#include <linux/stop_machine.h>
7
8#include <asm/cacheflush.h>
Rabin Vincentab0615e2014-04-24 23:28:57 +02009#include <asm/fixmap.h>
Rabin Vincentb21d55e2012-02-18 17:50:51 +010010#include <asm/smp_plat.h>
11#include <asm/opcodes.h>
Wang Nanfca08f32015-01-09 10:19:49 +080012#include <asm/patch.h>
Rabin Vincentb21d55e2012-02-18 17:50:51 +010013
14struct patch {
15 void *addr;
16 unsigned int insn;
17};
18
Peter Zijlstra7a7a8f52020-02-07 12:57:37 +010019#ifdef CONFIG_MMU
Yang Shi143c2a82019-02-13 17:14:23 +010020static DEFINE_RAW_SPINLOCK(patch_lock);
Rabin Vincentab0615e2014-04-24 23:28:57 +020021
22static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
Rabin Vincentab0615e2014-04-24 23:28:57 +020023{
24 unsigned int uintaddr = (uintptr_t) addr;
25 bool module = !core_kernel_text(uintaddr);
26 struct page *page;
27
Laura Abbott0f5bf6d2017-02-06 16:31:58 -080028 if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
Rabin Vincentab0615e2014-04-24 23:28:57 +020029 page = vmalloc_to_page(addr);
Laura Abbott0f5bf6d2017-02-06 16:31:58 -080030 else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
Rabin Vincentab0615e2014-04-24 23:28:57 +020031 page = virt_to_page(addr);
32 else
33 return addr;
34
35 if (flags)
Yang Shi143c2a82019-02-13 17:14:23 +010036 raw_spin_lock_irqsave(&patch_lock, *flags);
Rabin Vincentab0615e2014-04-24 23:28:57 +020037
38 set_fixmap(fixmap, page_to_phys(page));
39
40 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
41}
42
43static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
Rabin Vincentab0615e2014-04-24 23:28:57 +020044{
45 clear_fixmap(fixmap);
46
47 if (flags)
Yang Shi143c2a82019-02-13 17:14:23 +010048 raw_spin_unlock_irqrestore(&patch_lock, *flags);
Rabin Vincentab0615e2014-04-24 23:28:57 +020049}
Peter Zijlstra7a7a8f52020-02-07 12:57:37 +010050#else
51static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
52{
53 return addr;
54}
55static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { }
56#endif
Rabin Vincentab0615e2014-04-24 23:28:57 +020057
58void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
Rabin Vincentb21d55e2012-02-18 17:50:51 +010059{
60 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
Rabin Vincentab0615e2014-04-24 23:28:57 +020061 unsigned int uintaddr = (uintptr_t) addr;
62 bool twopage = false;
63 unsigned long flags;
64 void *waddr = addr;
Rabin Vincentb21d55e2012-02-18 17:50:51 +010065 int size;
66
Rabin Vincentab0615e2014-04-24 23:28:57 +020067 if (remap)
68 waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
Rabin Vincentab0615e2014-04-24 23:28:57 +020069
Rabin Vincentb21d55e2012-02-18 17:50:51 +010070 if (thumb2 && __opcode_is_thumb16(insn)) {
Rabin Vincentab0615e2014-04-24 23:28:57 +020071 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
Rabin Vincentb21d55e2012-02-18 17:50:51 +010072 size = sizeof(u16);
Rabin Vincentab0615e2014-04-24 23:28:57 +020073 } else if (thumb2 && (uintaddr & 2)) {
Rabin Vincentb21d55e2012-02-18 17:50:51 +010074 u16 first = __opcode_thumb32_first(insn);
75 u16 second = __opcode_thumb32_second(insn);
Rabin Vincentab0615e2014-04-24 23:28:57 +020076 u16 *addrh0 = waddr;
77 u16 *addrh1 = waddr + 2;
Rabin Vincentb21d55e2012-02-18 17:50:51 +010078
Rabin Vincentab0615e2014-04-24 23:28:57 +020079 twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2;
80 if (twopage && remap)
81 addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL);
82
83 *addrh0 = __opcode_to_mem_thumb16(first);
84 *addrh1 = __opcode_to_mem_thumb16(second);
85
86 if (twopage && addrh1 != addr + 2) {
87 flush_kernel_vmap_range(addrh1, 2);
88 patch_unmap(FIX_TEXT_POKE1, NULL);
89 }
Rabin Vincentb21d55e2012-02-18 17:50:51 +010090
91 size = sizeof(u32);
92 } else {
93 if (thumb2)
94 insn = __opcode_to_mem_thumb32(insn);
95 else
96 insn = __opcode_to_mem_arm(insn);
97
Rabin Vincentab0615e2014-04-24 23:28:57 +020098 *(u32 *)waddr = insn;
Rabin Vincentb21d55e2012-02-18 17:50:51 +010099 size = sizeof(u32);
100 }
101
Rabin Vincentab0615e2014-04-24 23:28:57 +0200102 if (waddr != addr) {
103 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
104 patch_unmap(FIX_TEXT_POKE0, &flags);
Peter Zijlstra7a7a8f52020-02-07 12:57:37 +0100105 }
Rabin Vincentab0615e2014-04-24 23:28:57 +0200106
Rabin Vincentb21d55e2012-02-18 17:50:51 +0100107 flush_icache_range((uintptr_t)(addr),
108 (uintptr_t)(addr) + size);
109}
110
111static int __kprobes patch_text_stop_machine(void *data)
112{
113 struct patch *patch = data;
114
115 __patch_text(patch->addr, patch->insn);
116
117 return 0;
118}
119
120void __kprobes patch_text(void *addr, unsigned int insn)
121{
122 struct patch patch = {
123 .addr = addr,
124 .insn = insn,
125 };
126
Thomas Gleixner9489cc82017-05-24 10:15:38 +0200127 stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
Rabin Vincentb21d55e2012-02-18 17:50:51 +0100128}