blob: 7663c8a51889cb8d15843b41b6c68bd6bbedaaec [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Kernel Probes (KProbes)
4 * kernel/kprobes.c
5 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Copyright (C) IBM Corporation, 2002, 2004
7 *
8 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
9 * Probes initial implementation (includes suggestions from
10 * Rusty Russell).
11 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
12 * hlists and exceptions notifier as suggested by Andi Kleen.
13 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
14 * interface to access function arguments.
15 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
16 * exceptions notifier to be first on the priority list.
Hien Nguyenb94cce92005-06-23 00:09:19 -070017 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
18 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
19 * <prasanna@in.ibm.com> added function-return probes.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +090021
22#define pr_fmt(fmt) "kprobes: " fmt
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kprobes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/hash.h>
26#include <linux/init.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080027#include <linux/slab.h>
Randy Dunlape3869792007-05-08 00:27:01 -070028#include <linux/stddef.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040029#include <linux/export.h>
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -070030#include <linux/moduleloader.h>
Ananth N Mavinakayanahalli3a872d82006-10-02 02:17:30 -070031#include <linux/kallsyms.h>
Masami Hiramatsub4c6c342006-12-06 20:38:11 -080032#include <linux/freezer.h>
Srinivasa Ds346fd592007-02-20 13:57:54 -080033#include <linux/seq_file.h>
34#include <linux/debugfs.h>
Masami Hiramatsub2be84d2010-02-25 08:34:15 -050035#include <linux/sysctl.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070036#include <linux/kdebug.h>
Mathieu Desnoyers4460fda2009-03-06 10:36:38 -050037#include <linux/memory.h>
Masami Hiramatsu4554dbc2010-02-02 16:49:18 -050038#include <linux/ftrace.h>
Masami Hiramatsuafd66252010-02-25 08:34:07 -050039#include <linux/cpu.h>
Jason Baronbf5438fc2010-09-17 11:09:00 -040040#include <linux/jump_label.h>
Peter Zijlstrafa68bd02021-06-28 13:24:12 +020041#include <linux/static_call.h>
Adrian Hunter69e49082020-05-12 15:19:11 +030042#include <linux/perf_event.h>
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -070043
Christoph Hellwigbfd45be2016-10-11 13:52:22 -070044#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <asm/cacheflush.h>
46#include <asm/errno.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49#define KPROBE_HASH_BITS 6
50#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
51
Ananth N Mavinakayanahalli3a872d82006-10-02 02:17:30 -070052
Srinivasa D Sef53d9c2008-07-25 01:46:04 -070053static int kprobes_initialized;
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +090054/* kprobe_table can be accessed by
55 * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
56 * Or
57 * - RCU hlist traversal under disabling preempt (breakpoint handlers)
58 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
60
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -070061/* NOTE: change this value only with kprobe_mutex held */
Masami Hiramatsue579abe2009-04-06 19:01:01 -070062static bool kprobes_all_disarmed;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -070063
Masami Hiramatsu43948f52010-10-25 22:18:01 +090064/* This protects kprobe_table and optimizing_list */
65static DEFINE_MUTEX(kprobe_mutex);
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -080066static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
Srinivasa D Sef53d9c2008-07-25 01:46:04 -070067
Naveen N. Rao290e3072017-04-19 18:21:01 +053068kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
69 unsigned int __unused)
Naveen N. Rao49e0b462017-04-19 18:21:00 +053070{
71 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
72}
73
Masami Hiramatsu376e2422014-04-17 17:17:05 +090074/* Blacklist -- list of struct kprobe_blacklist_entry */
75static LIST_HEAD(kprobe_blacklist);
Srinivasa Ds3d8d9962008-04-28 02:14:26 -070076
Anil S Keshavamurthy2d14e392006-01-09 20:52:41 -080077#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -070078/*
79 * kprobe->ainsn.insn points to the copy of the instruction to be
80 * single-stepped. x86_64, POWER4 and above have no-exec support and
81 * stepping on the instruction on a vmalloced/kmalloced/data page
82 * is a recipe for disaster
83 */
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -070084struct kprobe_insn_page {
Masami Hiramatsuc5cb5a22009-06-30 17:08:14 -040085 struct list_head list;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -070086 kprobe_opcode_t *insns; /* Page of instruction slots */
Heiko Carstensaf963972013-09-11 14:24:13 -070087 struct kprobe_insn_cache *cache;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -070088 int nused;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -080089 int ngarbage;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -050090 char slot_used[];
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -070091};
92
Masami Hiramatsu4610ee12010-02-25 08:33:59 -050093#define KPROBE_INSN_PAGE_SIZE(slots) \
94 (offsetof(struct kprobe_insn_page, slot_used) + \
95 (sizeof(char) * (slots)))
96
Masami Hiramatsu4610ee12010-02-25 08:33:59 -050097static int slots_per_page(struct kprobe_insn_cache *c)
98{
99 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
100}
101
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800102enum kprobe_slot_state {
103 SLOT_CLEAN = 0,
104 SLOT_DIRTY = 1,
105 SLOT_USED = 2,
106};
107
Masami Hiramatsu63fef142017-08-18 17:24:00 +0900108void __weak *alloc_insn_page(void)
Heiko Carstensaf963972013-09-11 14:24:13 -0700109{
110 return module_alloc(PAGE_SIZE);
111}
112
Barry Song66ce7512021-06-30 18:56:31 -0700113static void free_insn_page(void *page)
Heiko Carstensaf963972013-09-11 14:24:13 -0700114{
Rusty Russellbe1f2212015-01-20 09:07:05 +1030115 module_memfree(page);
Heiko Carstensaf963972013-09-11 14:24:13 -0700116}
117
Heiko Carstensc802d642013-09-11 14:24:11 -0700118struct kprobe_insn_cache kprobe_insn_slots = {
119 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
Heiko Carstensaf963972013-09-11 14:24:13 -0700120 .alloc = alloc_insn_page,
121 .free = free_insn_page,
Adrian Hunterd002b8b2020-05-28 11:00:58 +0300122 .sym = KPROBE_INSN_PAGE_SYM,
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500123 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
124 .insn_size = MAX_INSN_SIZE,
125 .nr_garbage = 0,
126};
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900127static int collect_garbage_slots(struct kprobe_insn_cache *c);
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800128
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700129/**
Masami Hiramatsu12941562009-01-06 14:41:50 -0800130 * __get_insn_slot() - Find a slot on an executable page for an instruction.
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700131 * We allocate an executable page if there's no room on existing ones.
132 */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900133kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700134{
135 struct kprobe_insn_page *kip;
Heiko Carstensc802d642013-09-11 14:24:11 -0700136 kprobe_opcode_t *slot = NULL;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700137
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900138 /* Since the slot array is not protected by rcu, we need a mutex */
Heiko Carstensc802d642013-09-11 14:24:11 -0700139 mutex_lock(&c->mutex);
Christoph Hellwig6f716ac2007-05-08 00:34:13 -0700140 retry:
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900141 rcu_read_lock();
142 list_for_each_entry_rcu(kip, &c->pages, list) {
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500143 if (kip->nused < slots_per_page(c)) {
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700144 int i;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500145 for (i = 0; i < slots_per_page(c); i++) {
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800146 if (kip->slot_used[i] == SLOT_CLEAN) {
147 kip->slot_used[i] = SLOT_USED;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700148 kip->nused++;
Heiko Carstensc802d642013-09-11 14:24:11 -0700149 slot = kip->insns + (i * c->insn_size);
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900150 rcu_read_unlock();
Heiko Carstensc802d642013-09-11 14:24:11 -0700151 goto out;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700152 }
153 }
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500154 /* kip->nused is broken. Fix it. */
155 kip->nused = slots_per_page(c);
156 WARN_ON(1);
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700157 }
158 }
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900159 rcu_read_unlock();
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700160
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800161 /* If there are any garbage slots, collect it and try again. */
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500162 if (c->nr_garbage && collect_garbage_slots(c) == 0)
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800163 goto retry;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500164
165 /* All out of space. Need to allocate a new page. */
166 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
Christoph Hellwig6f716ac2007-05-08 00:34:13 -0700167 if (!kip)
Heiko Carstensc802d642013-09-11 14:24:11 -0700168 goto out;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700169
170 /*
171 * Use module_alloc so this page is within +/- 2GB of where the
172 * kernel image and loaded module images reside. This is required
173 * so x86_64 can correctly handle the %rip-relative fixups.
174 */
Heiko Carstensaf963972013-09-11 14:24:13 -0700175 kip->insns = c->alloc();
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700176 if (!kip->insns) {
177 kfree(kip);
Heiko Carstensc802d642013-09-11 14:24:11 -0700178 goto out;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700179 }
Masami Hiramatsuc5cb5a22009-06-30 17:08:14 -0400180 INIT_LIST_HEAD(&kip->list);
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500181 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800182 kip->slot_used[0] = SLOT_USED;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700183 kip->nused = 1;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800184 kip->ngarbage = 0;
Heiko Carstensaf963972013-09-11 14:24:13 -0700185 kip->cache = c;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900186 list_add_rcu(&kip->list, &c->pages);
Heiko Carstensc802d642013-09-11 14:24:11 -0700187 slot = kip->insns;
Adrian Hunter69e49082020-05-12 15:19:11 +0300188
189 /* Record the perf ksymbol register event after adding the page */
190 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
191 PAGE_SIZE, false, c->sym);
Heiko Carstensc802d642013-09-11 14:24:11 -0700192out:
193 mutex_unlock(&c->mutex);
194 return slot;
Masami Hiramatsu12941562009-01-06 14:41:50 -0800195}
196
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800197/* Return 1 if all garbages are collected, otherwise 0. */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900198static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800199{
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800200 kip->slot_used[idx] = SLOT_CLEAN;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800201 kip->nused--;
202 if (kip->nused == 0) {
203 /*
204 * Page is no longer in use. Free it unless
205 * it's the last one. We keep the last one
206 * so as not to have to set it up again the
207 * next time somebody inserts a probe.
208 */
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500209 if (!list_is_singular(&kip->list)) {
Adrian Hunter69e49082020-05-12 15:19:11 +0300210 /*
211 * Record perf ksymbol unregister event before removing
212 * the page.
213 */
214 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
215 (unsigned long)kip->insns, PAGE_SIZE, true,
216 kip->cache->sym);
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900217 list_del_rcu(&kip->list);
218 synchronize_rcu();
Heiko Carstensaf963972013-09-11 14:24:13 -0700219 kip->cache->free(kip->insns);
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800220 kfree(kip);
221 }
222 return 1;
223 }
224 return 0;
225}
226
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900227static int collect_garbage_slots(struct kprobe_insn_cache *c)
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800228{
Masami Hiramatsuc5cb5a22009-06-30 17:08:14 -0400229 struct kprobe_insn_page *kip, *next;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800230
Masami Hiramatsu615d0eb2010-02-02 16:49:04 -0500231 /* Ensure no-one is interrupted on the garbages */
Paul E. McKenneyae8b7ce2018-11-06 19:04:39 -0800232 synchronize_rcu();
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800233
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500234 list_for_each_entry_safe(kip, next, &c->pages, list) {
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800235 int i;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800236 if (kip->ngarbage == 0)
237 continue;
238 kip->ngarbage = 0; /* we will collect all garbages */
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500239 for (i = 0; i < slots_per_page(c); i++) {
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900240 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800241 break;
242 }
243 }
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500244 c->nr_garbage = 0;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800245 return 0;
246}
247
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900248void __free_insn_slot(struct kprobe_insn_cache *c,
249 kprobe_opcode_t *slot, int dirty)
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500250{
251 struct kprobe_insn_page *kip;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900252 long idx;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500253
Heiko Carstensc802d642013-09-11 14:24:11 -0700254 mutex_lock(&c->mutex);
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900255 rcu_read_lock();
256 list_for_each_entry_rcu(kip, &c->pages, list) {
257 idx = ((long)slot - (long)kip->insns) /
258 (c->insn_size * sizeof(kprobe_opcode_t));
259 if (idx >= 0 && idx < slots_per_page(c))
Heiko Carstensc802d642013-09-11 14:24:11 -0700260 goto out;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900261 }
262 /* Could not find this slot. */
263 WARN_ON(1);
264 kip = NULL;
265out:
266 rcu_read_unlock();
267 /* Mark and sweep: this may sleep */
268 if (kip) {
269 /* Check double free */
270 WARN_ON(kip->slot_used[idx] != SLOT_USED);
271 if (dirty) {
272 kip->slot_used[idx] = SLOT_DIRTY;
273 kip->ngarbage++;
274 if (++c->nr_garbage > slots_per_page(c))
275 collect_garbage_slots(c);
276 } else {
277 collect_one_slot(kip, idx);
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500278 }
279 }
Heiko Carstensc802d642013-09-11 14:24:11 -0700280 mutex_unlock(&c->mutex);
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500281}
282
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900283/*
284 * Check given address is on the page of kprobe instruction slots.
285 * This will be used for checking whether the address on a stack
286 * is on a text area or not.
287 */
288bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
289{
290 struct kprobe_insn_page *kip;
291 bool ret = false;
292
293 rcu_read_lock();
294 list_for_each_entry_rcu(kip, &c->pages, list) {
295 if (addr >= (unsigned long)kip->insns &&
296 addr < (unsigned long)kip->insns + PAGE_SIZE) {
297 ret = true;
298 break;
299 }
300 }
301 rcu_read_unlock();
302
303 return ret;
304}
305
Adrian Hunterd002b8b2020-05-28 11:00:58 +0300306int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
307 unsigned long *value, char *type, char *sym)
308{
309 struct kprobe_insn_page *kip;
310 int ret = -ERANGE;
311
312 rcu_read_lock();
313 list_for_each_entry_rcu(kip, &c->pages, list) {
314 if ((*symnum)--)
315 continue;
316 strlcpy(sym, c->sym, KSYM_NAME_LEN);
317 *type = 't';
318 *value = (unsigned long)kip->insns;
319 ret = 0;
320 break;
321 }
322 rcu_read_unlock();
323
324 return ret;
325}
326
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500327#ifdef CONFIG_OPTPROBES
Christophe Leroy7ee3e972021-05-13 09:07:51 +0000328void __weak *alloc_optinsn_page(void)
329{
330 return alloc_insn_page();
331}
332
333void __weak free_optinsn_page(void *page)
334{
335 free_insn_page(page);
336}
337
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500338/* For optimized_kprobe buffer */
Heiko Carstensc802d642013-09-11 14:24:11 -0700339struct kprobe_insn_cache kprobe_optinsn_slots = {
340 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
Christophe Leroy7ee3e972021-05-13 09:07:51 +0000341 .alloc = alloc_optinsn_page,
342 .free = free_optinsn_page,
Adrian Hunterd002b8b2020-05-28 11:00:58 +0300343 .sym = KPROBE_OPTINSN_PAGE_SYM,
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500344 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
345 /* .insn_size is initialized later */
346 .nr_garbage = 0,
347};
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500348#endif
Anil S Keshavamurthy2d14e392006-01-09 20:52:41 -0800349#endif
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700350
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800351/* We have preemption disabled.. so it is safe to use __ versions */
352static inline void set_kprobe_instance(struct kprobe *kp)
353{
Christoph Lameterb76834b2010-12-06 11:16:25 -0600354 __this_cpu_write(kprobe_instance, kp);
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800355}
356
357static inline void reset_kprobe_instance(void)
358{
Christoph Lameterb76834b2010-12-06 11:16:25 -0600359 __this_cpu_write(kprobe_instance, NULL);
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800360}
361
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800362/*
363 * This routine is called either:
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -0800364 * - under the kprobe_mutex - during kprobe_[un]register()
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800365 * OR
Ananth N Mavinakayanahallid217d542005-11-07 01:00:14 -0800366 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800367 */
Masami Hiramatsu820aede2014-04-17 17:18:21 +0900368struct kprobe *get_kprobe(void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
370 struct hlist_head *head;
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800371 struct kprobe *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
Masami Hiramatsu6743ad42020-05-12 17:02:33 +0900374 hlist_for_each_entry_rcu(p, head, hlist,
375 lockdep_is_held(&kprobe_mutex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 if (p->addr == addr)
377 return p;
378 }
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 return NULL;
381}
Masami Hiramatsu820aede2014-04-17 17:18:21 +0900382NOKPROBE_SYMBOL(get_kprobe);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
Masami Hiramatsu820aede2014-04-17 17:18:21 +0900384static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500385
386/* Return true if the kprobe is an aggregator */
387static inline int kprobe_aggrprobe(struct kprobe *p)
388{
389 return p->pre_handler == aggr_pre_handler;
390}
391
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900392/* Return true(!0) if the kprobe is unused */
393static inline int kprobe_unused(struct kprobe *p)
394{
395 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
396 list_empty(&p->list);
397}
398
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500399/*
400 * Keep all fields in the kprobe consistent
401 */
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900402static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500403{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900404 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
405 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500406}
407
408#ifdef CONFIG_OPTPROBES
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500409/* NOTE: change this value only with kprobe_mutex held */
410static bool kprobes_allow_optimization;
411
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500412/*
413 * Call all pre_handler on the list, but ignores its return value.
414 * This must be called from arch-dep optimized caller.
415 */
Masami Hiramatsu820aede2014-04-17 17:18:21 +0900416void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500417{
418 struct kprobe *kp;
419
420 list_for_each_entry_rcu(kp, &p->list, list) {
421 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
422 set_kprobe_instance(kp);
Naveen N. Rao4f3a8712017-10-17 13:48:34 +0530423 kp->pre_handler(kp, regs);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500424 }
425 reset_kprobe_instance();
426 }
427}
Masami Hiramatsu820aede2014-04-17 17:18:21 +0900428NOKPROBE_SYMBOL(opt_pre_handler);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500429
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900430/* Free optimized instructions and optimized_kprobe */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900431static void free_aggr_kprobe(struct kprobe *p)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900432{
433 struct optimized_kprobe *op;
434
435 op = container_of(p, struct optimized_kprobe, kp);
436 arch_remove_optimized_kprobe(op);
437 arch_remove_kprobe(p);
438 kfree(op);
439}
440
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500441/* Return true(!0) if the kprobe is ready for optimization. */
442static inline int kprobe_optready(struct kprobe *p)
443{
444 struct optimized_kprobe *op;
445
446 if (kprobe_aggrprobe(p)) {
447 op = container_of(p, struct optimized_kprobe, kp);
448 return arch_prepared_optinsn(&op->optinsn);
449 }
450
451 return 0;
452}
453
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900454/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
455static inline int kprobe_disarmed(struct kprobe *p)
456{
457 struct optimized_kprobe *op;
458
459 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
460 if (!kprobe_aggrprobe(p))
461 return kprobe_disabled(p);
462
463 op = container_of(p, struct optimized_kprobe, kp);
464
465 return kprobe_disabled(p) && list_empty(&op->list);
466}
467
468/* Return true(!0) if the probe is queued on (un)optimizing lists */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900469static int kprobe_queued(struct kprobe *p)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900470{
471 struct optimized_kprobe *op;
472
473 if (kprobe_aggrprobe(p)) {
474 op = container_of(p, struct optimized_kprobe, kp);
475 if (!list_empty(&op->list))
476 return 1;
477 }
478 return 0;
479}
480
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500481/*
482 * Return an optimized kprobe whose optimizing code replaces
483 * instructions including addr (exclude breakpoint).
484 */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900485static struct kprobe *get_optimized_kprobe(unsigned long addr)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500486{
487 int i;
488 struct kprobe *p = NULL;
489 struct optimized_kprobe *op;
490
491 /* Don't check i == 0, since that is a breakpoint case. */
492 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
493 p = get_kprobe((void *)(addr - i));
494
495 if (p && kprobe_optready(p)) {
496 op = container_of(p, struct optimized_kprobe, kp);
497 if (arch_within_optimized_kprobe(op, addr))
498 return p;
499 }
500
501 return NULL;
502}
503
504/* Optimization staging list, protected by kprobe_mutex */
505static LIST_HEAD(optimizing_list);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900506static LIST_HEAD(unoptimizing_list);
Masami Hiramatsu7b959fc2013-05-22 18:34:09 +0900507static LIST_HEAD(freeing_list);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500508
509static void kprobe_optimizer(struct work_struct *work);
510static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
511#define OPTIMIZE_DELAY 5
512
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900513/*
514 * Optimize (replace a breakpoint with a jump) kprobes listed on
515 * optimizing_list.
516 */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900517static void do_optimize_kprobes(void)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500518{
Andrea Righif1c6ece2019-08-12 20:43:02 +0200519 lockdep_assert_held(&text_mutex);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500520 /*
521 * The optimization/unoptimization refers online_cpus via
522 * stop_machine() and cpu-hotplug modifies online_cpus.
523 * And same time, text_mutex will be held in cpu-hotplug and here.
524 * This combination can cause a deadlock (cpu-hotplug try to lock
525 * text_mutex but stop_machine can not be done because online_cpus
526 * has been changed)
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200527 * To avoid this deadlock, caller must have locked cpu hotplug
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500528 * for preventing cpu-hotplug outside of text_mutex locking.
529 */
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200530 lockdep_assert_cpus_held();
531
532 /* Optimization never be done when disarmed */
533 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
534 list_empty(&optimizing_list))
535 return;
536
Masami Hiramatsucd7ebe22010-12-03 18:54:28 +0900537 arch_optimize_kprobes(&optimizing_list);
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900538}
539
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900540/*
541 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
542 * if need) kprobes listed on unoptimizing_list.
543 */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900544static void do_unoptimize_kprobes(void)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900545{
546 struct optimized_kprobe *op, *tmp;
547
Andrea Righif1c6ece2019-08-12 20:43:02 +0200548 lockdep_assert_held(&text_mutex);
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200549 /* See comment in do_optimize_kprobes() */
550 lockdep_assert_cpus_held();
551
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900552 /* Unoptimization must be done anytime */
553 if (list_empty(&unoptimizing_list))
554 return;
555
Masami Hiramatsu7b959fc2013-05-22 18:34:09 +0900556 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
Masami Hiramatsuf984ba42010-12-03 18:54:34 +0900557 /* Loop free_list for disarming */
Masami Hiramatsu7b959fc2013-05-22 18:34:09 +0900558 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
Masami Hiramatsuf66c0442019-11-27 14:57:04 +0900559 /* Switching from detour code to origin */
560 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900561 /* Disarm probes if marked disabled */
562 if (kprobe_disabled(&op->kp))
563 arch_disarm_kprobe(&op->kp);
564 if (kprobe_unused(&op->kp)) {
565 /*
566 * Remove unused probes from hash list. After waiting
567 * for synchronization, these probes are reclaimed.
568 * (reclaiming is done by do_free_cleaned_kprobes.)
569 */
570 hlist_del_rcu(&op->kp.hlist);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900571 } else
572 list_del_init(&op->list);
573 }
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900574}
575
576/* Reclaim all kprobes on the free_list */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900577static void do_free_cleaned_kprobes(void)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900578{
579 struct optimized_kprobe *op, *tmp;
580
Masami Hiramatsu7b959fc2013-05-22 18:34:09 +0900581 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900582 list_del_init(&op->list);
Masami Hiramatsucbdd96f2018-09-11 19:21:09 +0900583 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
584 /*
585 * This must not happen, but if there is a kprobe
586 * still in use, keep it on kprobes hash list.
587 */
588 continue;
589 }
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900590 free_aggr_kprobe(&op->kp);
591 }
592}
593
594/* Start optimizer after OPTIMIZE_DELAY passed */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900595static void kick_kprobe_optimizer(void)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900596{
Tejun Heoad72b3b2012-12-21 17:57:00 -0800597 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900598}
599
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900600/* Kprobe jump optimizer */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900601static void kprobe_optimizer(struct work_struct *work)
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900602{
Steven Rostedt72ef3792012-06-05 19:28:14 +0900603 mutex_lock(&kprobe_mutex);
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200604 cpus_read_lock();
Andrea Righif1c6ece2019-08-12 20:43:02 +0200605 mutex_lock(&text_mutex);
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900606
607 /*
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900608 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
609 * kprobes before waiting for quiesence period.
610 */
Masami Hiramatsu7b959fc2013-05-22 18:34:09 +0900611 do_unoptimize_kprobes();
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900612
613 /*
Masami Hiramatsua30b85d2017-10-20 08:43:39 +0900614 * Step 2: Wait for quiesence period to ensure all potentially
615 * preempted tasks to have normally scheduled. Because optprobe
616 * may modify multiple instructions, there is a chance that Nth
617 * instruction is preempted. In that case, such tasks can return
618 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
619 * Note that on non-preemptive kernel, this is transparently converted
620 * to synchronoze_sched() to wait for all interrupts to have completed.
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900621 */
Masami Hiramatsua30b85d2017-10-20 08:43:39 +0900622 synchronize_rcu_tasks();
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900623
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900624 /* Step 3: Optimize kprobes after quiesence period */
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900625 do_optimize_kprobes();
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900626
627 /* Step 4: Free cleaned kprobes after quiesence period */
Masami Hiramatsu7b959fc2013-05-22 18:34:09 +0900628 do_free_cleaned_kprobes();
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900629
Andrea Righif1c6ece2019-08-12 20:43:02 +0200630 mutex_unlock(&text_mutex);
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200631 cpus_read_unlock();
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900632
Masami Hiramatsucd7ebe22010-12-03 18:54:28 +0900633 /* Step 5: Kick optimizer again if needed */
Masami Hiramatsuf984ba42010-12-03 18:54:34 +0900634 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
Masami Hiramatsucd7ebe22010-12-03 18:54:28 +0900635 kick_kprobe_optimizer();
Masami Hiramatsu1a0aa992020-05-12 17:02:56 +0900636
637 mutex_unlock(&kprobe_mutex);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900638}
639
640/* Wait for completing optimization and unoptimization */
Thomas Gleixner30e7d8942017-05-17 10:19:49 +0200641void wait_for_kprobe_optimizer(void)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900642{
Tejun Heoad72b3b2012-12-21 17:57:00 -0800643 mutex_lock(&kprobe_mutex);
644
645 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
646 mutex_unlock(&kprobe_mutex);
647
648 /* this will also make optimizing_work execute immmediately */
649 flush_delayed_work(&optimizing_work);
650 /* @optimizing_work might not have been queued yet, relax */
651 cpu_relax();
652
653 mutex_lock(&kprobe_mutex);
654 }
655
656 mutex_unlock(&kprobe_mutex);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500657}
658
Masami Hiramatsue4add242020-01-07 23:42:24 +0900659static bool optprobe_queued_unopt(struct optimized_kprobe *op)
660{
661 struct optimized_kprobe *_op;
662
663 list_for_each_entry(_op, &unoptimizing_list, list) {
664 if (op == _op)
665 return true;
666 }
667
668 return false;
669}
670
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500671/* Optimize kprobe if p is ready to be optimized */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900672static void optimize_kprobe(struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500673{
674 struct optimized_kprobe *op;
675
676 /* Check if the kprobe is disabled or not ready for optimization. */
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500677 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500678 (kprobe_disabled(p) || kprobes_all_disarmed))
679 return;
680
Masami Hiramatsu059053a2018-06-20 01:10:27 +0900681 /* kprobes with post_handler can not be optimized */
682 if (p->post_handler)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500683 return;
684
685 op = container_of(p, struct optimized_kprobe, kp);
686
687 /* Check there is no other kprobes at the optimized instructions */
688 if (arch_check_optimized_kprobe(op) < 0)
689 return;
690
691 /* Check if it is already optimized. */
Masami Hiramatsue4add242020-01-07 23:42:24 +0900692 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
693 if (optprobe_queued_unopt(op)) {
694 /* This is under unoptimizing. Just dequeue the probe */
695 list_del_init(&op->list);
696 }
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500697 return;
Masami Hiramatsue4add242020-01-07 23:42:24 +0900698 }
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500699 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900700
Masami Hiramatsue4add242020-01-07 23:42:24 +0900701 /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
702 if (WARN_ON_ONCE(!list_empty(&op->list)))
703 return;
704
705 list_add(&op->list, &optimizing_list);
706 kick_kprobe_optimizer();
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900707}
708
709/* Short cut to direct unoptimizing */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900710static void force_unoptimize_kprobe(struct optimized_kprobe *op)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900711{
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200712 lockdep_assert_cpus_held();
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900713 arch_unoptimize_kprobe(op);
Masami Hiramatsuf66c0442019-11-27 14:57:04 +0900714 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500715}
716
717/* Unoptimize a kprobe if p is optimized */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900718static void unoptimize_kprobe(struct kprobe *p, bool force)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500719{
720 struct optimized_kprobe *op;
721
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900722 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
723 return; /* This is not an optprobe nor optimized */
724
725 op = container_of(p, struct optimized_kprobe, kp);
Masami Hiramatsue4add242020-01-07 23:42:24 +0900726 if (!kprobe_optimized(p))
727 return;
728
729 if (!list_empty(&op->list)) {
730 if (optprobe_queued_unopt(op)) {
731 /* Queued in unoptimizing queue */
732 if (force) {
733 /*
734 * Forcibly unoptimize the kprobe here, and queue it
735 * in the freeing list for release afterwards.
736 */
737 force_unoptimize_kprobe(op);
738 list_move(&op->list, &freeing_list);
739 }
740 } else {
741 /* Dequeue from the optimizing queue */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500742 list_del_init(&op->list);
Masami Hiramatsue4add242020-01-07 23:42:24 +0900743 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900744 }
745 return;
746 }
747
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900748 /* Optimized kprobe case */
Masami Hiramatsue4add242020-01-07 23:42:24 +0900749 if (force) {
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900750 /* Forcibly update the code: this is a special case */
751 force_unoptimize_kprobe(op);
Masami Hiramatsue4add242020-01-07 23:42:24 +0900752 } else {
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900753 list_add(&op->list, &unoptimizing_list);
754 kick_kprobe_optimizer();
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500755 }
756}
757
Masami Hiramatsu0490cd12010-12-03 18:54:16 +0900758/* Cancel unoptimizing for reusing */
Masami Hiramatsu819319f2018-09-11 19:20:40 +0900759static int reuse_unused_kprobe(struct kprobe *ap)
Masami Hiramatsu0490cd12010-12-03 18:54:16 +0900760{
761 struct optimized_kprobe *op;
762
Masami Hiramatsu0490cd12010-12-03 18:54:16 +0900763 /*
764 * Unused kprobe MUST be on the way of delayed unoptimizing (means
765 * there is still a relative jump) and disabled.
766 */
767 op = container_of(ap, struct optimized_kprobe, kp);
Masami Hiramatsu44585152018-04-28 21:36:33 +0900768 WARN_ON_ONCE(list_empty(&op->list));
Masami Hiramatsu0490cd12010-12-03 18:54:16 +0900769 /* Enable the probe again */
770 ap->flags &= ~KPROBE_FLAG_DISABLED;
771 /* Optimize it again (remove from op->list) */
Masami Hiramatsu5f843ed2019-04-15 15:01:25 +0900772 if (!kprobe_optready(ap))
773 return -EINVAL;
Masami Hiramatsu819319f2018-09-11 19:20:40 +0900774
Masami Hiramatsu0490cd12010-12-03 18:54:16 +0900775 optimize_kprobe(ap);
Masami Hiramatsu819319f2018-09-11 19:20:40 +0900776 return 0;
Masami Hiramatsu0490cd12010-12-03 18:54:16 +0900777}
778
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500779/* Remove optimized instructions */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900780static void kill_optimized_kprobe(struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500781{
782 struct optimized_kprobe *op;
783
784 op = container_of(p, struct optimized_kprobe, kp);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900785 if (!list_empty(&op->list))
786 /* Dequeue from the (un)optimization queue */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500787 list_del_init(&op->list);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900788 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
Masami Hiramatsu7b959fc2013-05-22 18:34:09 +0900789
790 if (kprobe_unused(p)) {
791 /* Enqueue if it is unused */
792 list_add(&op->list, &freeing_list);
793 /*
794 * Remove unused probes from the hash list. After waiting
795 * for synchronization, this probe is reclaimed.
796 * (reclaiming is done by do_free_cleaned_kprobes().)
797 */
798 hlist_del_rcu(&op->kp.hlist);
799 }
800
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900801 /* Don't touch the code, because it is already freed. */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500802 arch_remove_optimized_kprobe(op);
803}
804
Masami Hiramatsua4602462017-04-19 18:22:25 +0530805static inline
806void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
807{
808 if (!kprobe_ftrace(p))
809 arch_prepare_optimized_kprobe(op, p);
810}
811
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500812/* Try to prepare optimized instructions */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900813static void prepare_optimized_kprobe(struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500814{
815 struct optimized_kprobe *op;
816
817 op = container_of(p, struct optimized_kprobe, kp);
Masami Hiramatsua4602462017-04-19 18:22:25 +0530818 __prepare_optimized_kprobe(op, p);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500819}
820
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500821/* Allocate new optimized_kprobe and try to prepare optimized instructions */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900822static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500823{
824 struct optimized_kprobe *op;
825
826 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
827 if (!op)
828 return NULL;
829
830 INIT_LIST_HEAD(&op->list);
831 op->kp.addr = p->addr;
Masami Hiramatsua4602462017-04-19 18:22:25 +0530832 __prepare_optimized_kprobe(op, p);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500833
834 return &op->kp;
835}
836
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900837static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500838
839/*
840 * Prepare an optimized_kprobe and optimize it
841 * NOTE: p must be a normal registered kprobe
842 */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900843static void try_to_optimize_kprobe(struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500844{
845 struct kprobe *ap;
846 struct optimized_kprobe *op;
847
Masami Hiramatsuae6aa162012-06-05 19:28:32 +0900848 /* Impossible to optimize ftrace-based kprobe */
849 if (kprobe_ftrace(p))
850 return;
851
Masami Hiramatsu25764282012-06-05 19:28:26 +0900852 /* For preparing optimization, jump_label_text_reserved() is called */
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200853 cpus_read_lock();
Masami Hiramatsu25764282012-06-05 19:28:26 +0900854 jump_label_lock();
855 mutex_lock(&text_mutex);
856
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500857 ap = alloc_aggr_kprobe(p);
858 if (!ap)
Masami Hiramatsu25764282012-06-05 19:28:26 +0900859 goto out;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500860
861 op = container_of(ap, struct optimized_kprobe, kp);
862 if (!arch_prepared_optinsn(&op->optinsn)) {
863 /* If failed to setup optimizing, fallback to kprobe */
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900864 arch_remove_optimized_kprobe(op);
865 kfree(op);
Masami Hiramatsu25764282012-06-05 19:28:26 +0900866 goto out;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500867 }
868
869 init_aggr_kprobe(ap, p);
Masami Hiramatsu25764282012-06-05 19:28:26 +0900870 optimize_kprobe(ap); /* This just kicks optimizer thread */
871
872out:
873 mutex_unlock(&text_mutex);
874 jump_label_unlock();
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200875 cpus_read_unlock();
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500876}
877
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900878static void optimize_all_kprobes(void)
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500879{
880 struct hlist_head *head;
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500881 struct kprobe *p;
882 unsigned int i;
883
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900884 mutex_lock(&kprobe_mutex);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500885 /* If optimization is already allowed, just return */
886 if (kprobes_allow_optimization)
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900887 goto out;
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500888
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200889 cpus_read_lock();
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500890 kprobes_allow_optimization = true;
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500891 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
892 head = &kprobe_table[i];
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +0900893 hlist_for_each_entry(p, head, hlist)
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500894 if (!kprobe_disabled(p))
895 optimize_kprobe(p);
896 }
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200897 cpus_read_unlock();
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +0900898 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900899out:
900 mutex_unlock(&kprobe_mutex);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500901}
902
Masami Hiramatsuc85c9a22021-02-18 23:29:23 +0900903#ifdef CONFIG_SYSCTL
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900904static void unoptimize_all_kprobes(void)
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500905{
906 struct hlist_head *head;
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500907 struct kprobe *p;
908 unsigned int i;
909
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900910 mutex_lock(&kprobe_mutex);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500911 /* If optimization is already prohibited, just return */
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900912 if (!kprobes_allow_optimization) {
913 mutex_unlock(&kprobe_mutex);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500914 return;
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900915 }
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500916
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200917 cpus_read_lock();
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500918 kprobes_allow_optimization = false;
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500919 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
920 head = &kprobe_table[i];
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +0900921 hlist_for_each_entry(p, head, hlist) {
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500922 if (!kprobe_disabled(p))
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900923 unoptimize_kprobe(p, false);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500924 }
925 }
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +0200926 cpus_read_unlock();
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900927 mutex_unlock(&kprobe_mutex);
928
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900929 /* Wait for unoptimizing completion */
930 wait_for_kprobe_optimizer();
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +0900931 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500932}
933
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900934static DEFINE_MUTEX(kprobe_sysctl_mutex);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500935int sysctl_kprobes_optimization;
936int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +0200937 void *buffer, size_t *length,
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500938 loff_t *ppos)
939{
940 int ret;
941
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900942 mutex_lock(&kprobe_sysctl_mutex);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500943 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
944 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
945
946 if (sysctl_kprobes_optimization)
947 optimize_all_kprobes();
948 else
949 unoptimize_all_kprobes();
Masami Hiramatsu5c515432013-04-18 18:33:18 +0900950 mutex_unlock(&kprobe_sysctl_mutex);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500951
952 return ret;
953}
954#endif /* CONFIG_SYSCTL */
955
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900956/* Put a breakpoint for a probe. Must be called with text_mutex locked */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900957static void __arm_kprobe(struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500958{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900959 struct kprobe *_p;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500960
961 /* Check collision with other optimized kprobes */
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900962 _p = get_optimized_kprobe((unsigned long)p->addr);
963 if (unlikely(_p))
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900964 /* Fallback to unoptimized kprobe */
965 unoptimize_kprobe(_p, true);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500966
967 arch_arm_kprobe(p);
968 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
969}
970
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900971/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
Masami Hiramatsu55479f62014-04-17 17:17:54 +0900972static void __disarm_kprobe(struct kprobe *p, bool reopt)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500973{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900974 struct kprobe *_p;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500975
Wang Nan69d54b92015-02-13 14:40:26 -0800976 /* Try to unoptimize */
977 unoptimize_kprobe(p, kprobes_all_disarmed);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500978
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900979 if (!kprobe_queued(p)) {
980 arch_disarm_kprobe(p);
981 /* If another kprobe was blocked, optimize it. */
982 _p = get_optimized_kprobe((unsigned long)p->addr);
983 if (unlikely(_p) && reopt)
984 optimize_kprobe(_p);
985 }
986 /* TODO: reoptimize others after unoptimized this probe */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500987}
988
989#else /* !CONFIG_OPTPROBES */
990
991#define optimize_kprobe(p) do {} while (0)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900992#define unoptimize_kprobe(p, f) do {} while (0)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500993#define kill_optimized_kprobe(p) do {} while (0)
994#define prepare_optimized_kprobe(p) do {} while (0)
995#define try_to_optimize_kprobe(p) do {} while (0)
996#define __arm_kprobe(p) arch_arm_kprobe(p)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900997#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
998#define kprobe_disarmed(p) kprobe_disabled(p)
999#define wait_for_kprobe_optimizer() do {} while (0)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001000
Masami Hiramatsu819319f2018-09-11 19:20:40 +09001001static int reuse_unused_kprobe(struct kprobe *ap)
Masami Hiramatsu0490cd12010-12-03 18:54:16 +09001002{
Masami Hiramatsu819319f2018-09-11 19:20:40 +09001003 /*
1004 * If the optimized kprobe is NOT supported, the aggr kprobe is
1005 * released at the same time that the last aggregated kprobe is
1006 * unregistered.
1007 * Thus there should be no chance to reuse unused kprobe.
1008 */
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09001009 WARN_ON_ONCE(1);
Masami Hiramatsu819319f2018-09-11 19:20:40 +09001010 return -EINVAL;
Masami Hiramatsu0490cd12010-12-03 18:54:16 +09001011}
1012
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001013static void free_aggr_kprobe(struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001014{
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001015 arch_remove_kprobe(p);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001016 kfree(p);
1017}
1018
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001019static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001020{
1021 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1022}
1023#endif /* CONFIG_OPTPROBES */
1024
Masami Hiramatsue7dbfe32012-09-28 17:15:20 +09001025#ifdef CONFIG_KPROBES_ON_FTRACE
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001026static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
Masami Hiramatsue5253892012-06-05 19:28:38 +09001027 .func = kprobe_ftrace_handler,
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001028 .flags = FTRACE_OPS_FL_SAVE_REGS,
1029};
1030
1031static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
1032 .func = kprobe_ftrace_handler,
Masami Hiramatsu1d70be32014-11-21 05:25:23 -05001033 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001034};
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001035
1036static int kprobe_ipmodify_enabled;
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001037static int kprobe_ftrace_enabled;
1038
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001039/* Caller must lock kprobe_mutex */
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001040static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1041 int *cnt)
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001042{
Jessica Yu12310e342018-01-10 00:51:23 +01001043 int ret = 0;
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001044
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001045 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09001046 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
Jessica Yu12310e342018-01-10 00:51:23 +01001047 return ret;
Jessica Yu12310e342018-01-10 00:51:23 +01001048
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001049 if (*cnt == 0) {
1050 ret = register_ftrace_function(ops);
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09001051 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
Jessica Yu12310e342018-01-10 00:51:23 +01001052 goto err_ftrace;
Jessica Yu12310e342018-01-10 00:51:23 +01001053 }
1054
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001055 (*cnt)++;
Jessica Yu12310e342018-01-10 00:51:23 +01001056 return ret;
1057
1058err_ftrace:
1059 /*
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001060 * At this point, sinec ops is not registered, we should be sefe from
1061 * registering empty filter.
Jessica Yu12310e342018-01-10 00:51:23 +01001062 */
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001063 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
Jessica Yu12310e342018-01-10 00:51:23 +01001064 return ret;
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001065}
1066
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001067static int arm_kprobe_ftrace(struct kprobe *p)
1068{
1069 bool ipmodify = (p->post_handler != NULL);
1070
1071 return __arm_kprobe_ftrace(p,
1072 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1073 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1074}
1075
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001076/* Caller must lock kprobe_mutex */
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001077static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1078 int *cnt)
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001079{
Jessica Yu297f9232018-01-10 00:51:24 +01001080 int ret = 0;
1081
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001082 if (*cnt == 1) {
1083 ret = unregister_ftrace_function(ops);
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09001084 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
Jessica Yu297f9232018-01-10 00:51:24 +01001085 return ret;
1086 }
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001087
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001088 (*cnt)--;
Jessica Yu297f9232018-01-10 00:51:24 +01001089
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001090 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09001091 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
Masami Hiramatsu44585152018-04-28 21:36:33 +09001092 p->addr, ret);
Jessica Yu297f9232018-01-10 00:51:24 +01001093 return ret;
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001094}
Masami Hiramatsu0bc11ed2019-07-25 15:24:37 +09001095
1096static int disarm_kprobe_ftrace(struct kprobe *p)
1097{
1098 bool ipmodify = (p->post_handler != NULL);
1099
1100 return __disarm_kprobe_ftrace(p,
1101 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1102 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1103}
Masami Hiramatsue7dbfe32012-09-28 17:15:20 +09001104#else /* !CONFIG_KPROBES_ON_FTRACE */
Muchun Song10de7952020-08-06 01:20:46 +08001105static inline int arm_kprobe_ftrace(struct kprobe *p)
1106{
1107 return -ENODEV;
1108}
1109
1110static inline int disarm_kprobe_ftrace(struct kprobe *p)
1111{
1112 return -ENODEV;
1113}
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001114#endif
1115
Punit Agrawal02afb8d2021-09-14 23:38:57 +09001116static int prepare_kprobe(struct kprobe *p)
1117{
1118 /* Must ensure p->addr is really on ftrace */
1119 if (kprobe_ftrace(p))
1120 return arch_prepare_kprobe_ftrace(p);
1121
1122 return arch_prepare_kprobe(p);
1123}
1124
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001125/* Arm a kprobe with text_mutex */
Jessica Yu12310e342018-01-10 00:51:23 +01001126static int arm_kprobe(struct kprobe *kp)
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001127{
Jessica Yu12310e342018-01-10 00:51:23 +01001128 if (unlikely(kprobe_ftrace(kp)))
1129 return arm_kprobe_ftrace(kp);
1130
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +02001131 cpus_read_lock();
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001132 mutex_lock(&text_mutex);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001133 __arm_kprobe(kp);
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001134 mutex_unlock(&text_mutex);
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +02001135 cpus_read_unlock();
Jessica Yu12310e342018-01-10 00:51:23 +01001136
1137 return 0;
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001138}
1139
1140/* Disarm a kprobe with text_mutex */
Jessica Yu297f9232018-01-10 00:51:24 +01001141static int disarm_kprobe(struct kprobe *kp, bool reopt)
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001142{
Jessica Yu297f9232018-01-10 00:51:24 +01001143 if (unlikely(kprobe_ftrace(kp)))
1144 return disarm_kprobe_ftrace(kp);
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +02001145
1146 cpus_read_lock();
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001147 mutex_lock(&text_mutex);
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001148 __disarm_kprobe(kp, reopt);
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001149 mutex_unlock(&text_mutex);
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +02001150 cpus_read_unlock();
Jessica Yu297f9232018-01-10 00:51:24 +01001151
1152 return 0;
Masami Hiramatsu201517a2009-05-07 16:31:26 -04001153}
1154
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001155/*
1156 * Aggregate handlers for multiple kprobes support - these handlers
1157 * take care of invoking the individual kprobe handlers on p->list
1158 */
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001159static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001160{
1161 struct kprobe *kp;
1162
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -08001163 list_for_each_entry_rcu(kp, &p->list, list) {
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001164 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -08001165 set_kprobe_instance(kp);
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001166 if (kp->pre_handler(kp, regs))
1167 return 1;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001168 }
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -08001169 reset_kprobe_instance();
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001170 }
1171 return 0;
1172}
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001173NOKPROBE_SYMBOL(aggr_pre_handler);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001174
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001175static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1176 unsigned long flags)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001177{
1178 struct kprobe *kp;
1179
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -08001180 list_for_each_entry_rcu(kp, &p->list, list) {
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001181 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -08001182 set_kprobe_instance(kp);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001183 kp->post_handler(kp, regs, flags);
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -08001184 reset_kprobe_instance();
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001185 }
1186 }
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001187}
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001188NOKPROBE_SYMBOL(aggr_post_handler);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001189
Keshavamurthy Anil Sbf8d5c52005-12-12 00:37:34 -08001190/* Walks the list and increments nmissed count for multiprobe case */
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001191void kprobes_inc_nmissed_count(struct kprobe *p)
Keshavamurthy Anil Sbf8d5c52005-12-12 00:37:34 -08001192{
1193 struct kprobe *kp;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001194 if (!kprobe_aggrprobe(p)) {
Keshavamurthy Anil Sbf8d5c52005-12-12 00:37:34 -08001195 p->nmissed++;
1196 } else {
1197 list_for_each_entry_rcu(kp, &p->list, list)
1198 kp->nmissed++;
1199 }
1200 return;
1201}
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001202NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
Keshavamurthy Anil Sbf8d5c52005-12-12 00:37:34 -08001203
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001204static void free_rp_inst_rcu(struct rcu_head *head)
1205{
1206 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
1207
1208 if (refcount_dec_and_test(&ri->rph->ref))
1209 kfree(ri->rph);
1210 kfree(ri);
1211}
1212NOKPROBE_SYMBOL(free_rp_inst_rcu);
1213
Masami Hiramatsub3388172020-08-29 22:02:47 +09001214static void recycle_rp_inst(struct kretprobe_instance *ri)
Hien Nguyenb94cce92005-06-23 00:09:19 -07001215{
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001216 struct kretprobe *rp = get_kretprobe(ri);
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001217
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001218 if (likely(rp)) {
Peter Zijlstra6e426e02020-08-29 22:03:56 +09001219 freelist_add(&ri->freelist, &rp->freelist);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001220 } else
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001221 call_rcu(&ri->rcu, free_rp_inst_rcu);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001222}
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001223NOKPROBE_SYMBOL(recycle_rp_inst);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001224
Masami Hiramatsu319f0ce2020-08-29 22:03:02 +09001225static struct kprobe kprobe_busy = {
Jiri Olsa9b38cc72020-05-12 17:03:18 +09001226 .addr = (void *) get_kprobe,
1227};
1228
1229void kprobe_busy_begin(void)
1230{
1231 struct kprobe_ctlblk *kcb;
1232
1233 preempt_disable();
1234 __this_cpu_write(current_kprobe, &kprobe_busy);
1235 kcb = get_kprobe_ctlblk();
1236 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1237}
1238
1239void kprobe_busy_end(void)
1240{
1241 __this_cpu_write(current_kprobe, NULL);
1242 preempt_enable();
1243}
1244
Hien Nguyenb94cce92005-06-23 00:09:19 -07001245/*
bibo maoc6fd91f2006-03-26 01:38:20 -08001246 * This function is called from finish_task_switch when task tk becomes dead,
1247 * so that we can recycle any function-return probe instances associated
1248 * with this task. These left over instances represent probed functions
1249 * that have been called but will never return.
Hien Nguyenb94cce92005-06-23 00:09:19 -07001250 */
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001251void kprobe_flush_task(struct task_struct *tk)
Hien Nguyenb94cce92005-06-23 00:09:19 -07001252{
bibo,mao62c27be2006-10-02 02:17:33 -07001253 struct kretprobe_instance *ri;
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001254 struct llist_node *node;
Rusty Lynch802eae72005-06-27 15:17:08 -07001255
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001256 /* Early boot, not yet initialized. */
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001257 if (unlikely(!kprobes_initialized))
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001258 return;
1259
Jiri Olsa9b38cc72020-05-12 17:03:18 +09001260 kprobe_busy_begin();
1261
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001262 node = __llist_del_all(&tk->kretprobe_instances);
1263 while (node) {
1264 ri = container_of(node, struct kretprobe_instance, llist);
1265 node = node->next;
1266
1267 recycle_rp_inst(ri);
bibo,mao62c27be2006-10-02 02:17:33 -07001268 }
Jiri Olsa9b38cc72020-05-12 17:03:18 +09001269
1270 kprobe_busy_end();
Hien Nguyenb94cce92005-06-23 00:09:19 -07001271}
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001272NOKPROBE_SYMBOL(kprobe_flush_task);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001273
Hien Nguyenb94cce92005-06-23 00:09:19 -07001274static inline void free_rp_inst(struct kretprobe *rp)
1275{
1276 struct kretprobe_instance *ri;
Peter Zijlstra6e426e02020-08-29 22:03:56 +09001277 struct freelist_node *node;
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001278 int count = 0;
Christoph Hellwig4c4308c2007-05-08 00:34:14 -07001279
Peter Zijlstra6e426e02020-08-29 22:03:56 +09001280 node = rp->freelist.head;
1281 while (node) {
1282 ri = container_of(node, struct kretprobe_instance, freelist);
1283 node = node->next;
1284
Hien Nguyenb94cce92005-06-23 00:09:19 -07001285 kfree(ri);
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001286 count++;
1287 }
1288
1289 if (refcount_sub_and_test(count, &rp->rph->ref)) {
1290 kfree(rp->rph);
1291 rp->rph = NULL;
Hien Nguyenb94cce92005-06-23 00:09:19 -07001292 }
1293}
1294
Masami Hiramatsu059053a2018-06-20 01:10:27 +09001295/* Add the new probe to ap->list */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001296static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001297{
Masami Hiramatsu059053a2018-06-20 01:10:27 +09001298 if (p->post_handler)
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001299 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001300
Masami Hiramatsu059053a2018-06-20 01:10:27 +09001301 list_add_rcu(&p->list, &ap->list);
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001302 if (p->post_handler && !ap->post_handler)
1303 ap->post_handler = aggr_post_handler;
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001304
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001305 return 0;
1306}
1307
1308/*
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001309 * Fill in the required fields of the "manager kprobe". Replace the
1310 * earlier kprobe in the hlist with the manager kprobe
1311 */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001312static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001313{
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001314 /* Copy p's insn slot to ap */
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001315 copy_kprobe(p, ap);
bibo, maoa9ad9652006-07-30 03:03:26 -07001316 flush_insn_slot(ap);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001317 ap->addr = p->addr;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001318 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001319 ap->pre_handler = aggr_pre_handler;
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001320 /* We don't care the kprobe which has gone. */
1321 if (p->post_handler && !kprobe_gone(p))
mao, bibo36721652006-06-26 00:25:22 -07001322 ap->post_handler = aggr_post_handler;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001323
1324 INIT_LIST_HEAD(&ap->list);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001325 INIT_HLIST_NODE(&ap->hlist);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001326
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001327 list_add_rcu(&p->list, &ap->list);
Keshavamurthy Anil Sadad0f32005-12-12 00:37:12 -08001328 hlist_replace_rcu(&p->hlist, &ap->hlist);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001329}
1330
1331/*
1332 * This is the second or subsequent kprobe at the address - handle
1333 * the intricacies
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001334 */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001335static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001336{
1337 int ret = 0;
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001338 struct kprobe *ap = orig_p;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001339
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +02001340 cpus_read_lock();
1341
Masami Hiramatsu25764282012-06-05 19:28:26 +09001342 /* For preparing optimization, jump_label_text_reserved() is called */
1343 jump_label_lock();
Masami Hiramatsu25764282012-06-05 19:28:26 +09001344 mutex_lock(&text_mutex);
1345
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001346 if (!kprobe_aggrprobe(orig_p)) {
1347 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1348 ap = alloc_aggr_kprobe(orig_p);
Masami Hiramatsu25764282012-06-05 19:28:26 +09001349 if (!ap) {
1350 ret = -ENOMEM;
1351 goto out;
1352 }
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001353 init_aggr_kprobe(ap, orig_p);
Masami Hiramatsu819319f2018-09-11 19:20:40 +09001354 } else if (kprobe_unused(ap)) {
Masami Hiramatsu0490cd12010-12-03 18:54:16 +09001355 /* This probe is going to die. Rescue it */
Masami Hiramatsu819319f2018-09-11 19:20:40 +09001356 ret = reuse_unused_kprobe(ap);
1357 if (ret)
1358 goto out;
1359 }
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001360
1361 if (kprobe_gone(ap)) {
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001362 /*
1363 * Attempting to insert new probe at the same location that
1364 * had a probe in the module vaddr area which already
1365 * freed. So, the instruction slot has already been
1366 * released. We need a new slot for the new probe.
1367 */
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001368 ret = arch_prepare_kprobe(ap);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001369 if (ret)
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001370 /*
1371 * Even if fail to allocate new slot, don't need to
1372 * free aggr_probe. It will be used next time, or
1373 * freed by unregister_kprobe.
1374 */
Masami Hiramatsu25764282012-06-05 19:28:26 +09001375 goto out;
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001376
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001377 /* Prepare optimized instructions if possible. */
1378 prepare_optimized_kprobe(ap);
1379
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001380 /*
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001381 * Clear gone flag to prevent allocating new slot again, and
1382 * set disabled flag because it is not armed yet.
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001383 */
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001384 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1385 | KPROBE_FLAG_DISABLED;
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001386 }
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001387
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001388 /* Copy ap's insn slot to p */
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001389 copy_kprobe(ap, p);
Masami Hiramatsu25764282012-06-05 19:28:26 +09001390 ret = add_new_kprobe(ap, p);
1391
1392out:
1393 mutex_unlock(&text_mutex);
Masami Hiramatsu25764282012-06-05 19:28:26 +09001394 jump_label_unlock();
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +02001395 cpus_read_unlock();
Masami Hiramatsu25764282012-06-05 19:28:26 +09001396
1397 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1398 ap->flags &= ~KPROBE_FLAG_DISABLED;
Jessica Yu12310e342018-01-10 00:51:23 +01001399 if (!kprobes_all_disarmed) {
Masami Hiramatsu25764282012-06-05 19:28:26 +09001400 /* Arm the breakpoint again. */
Jessica Yu12310e342018-01-10 00:51:23 +01001401 ret = arm_kprobe(ap);
1402 if (ret) {
1403 ap->flags |= KPROBE_FLAG_DISABLED;
1404 list_del_rcu(&p->list);
Paul E. McKenneyae8b7ce2018-11-06 19:04:39 -08001405 synchronize_rcu();
Jessica Yu12310e342018-01-10 00:51:23 +01001406 }
1407 }
Masami Hiramatsu25764282012-06-05 19:28:26 +09001408 }
1409 return ret;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001410}
1411
Masami Hiramatsube8f2742014-04-17 17:16:58 +09001412bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1413{
1414 /* The __kprobes marked functions and entry code must not be probed */
1415 return addr >= (unsigned long)__kprobes_text_start &&
1416 addr < (unsigned long)__kprobes_text_end;
1417}
1418
Masami Hiramatsu6143c6f2019-02-13 01:13:12 +09001419static bool __within_kprobe_blacklist(unsigned long addr)
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001420{
Masami Hiramatsu376e2422014-04-17 17:17:05 +09001421 struct kprobe_blacklist_entry *ent;
Srinivasa Ds3d8d9962008-04-28 02:14:26 -07001422
Masami Hiramatsube8f2742014-04-17 17:16:58 +09001423 if (arch_within_kprobe_blacklist(addr))
Masami Hiramatsu376e2422014-04-17 17:17:05 +09001424 return true;
Srinivasa Ds3d8d9962008-04-28 02:14:26 -07001425 /*
1426 * If there exists a kprobe_blacklist, verify and
1427 * fail any probe registration in the prohibited area
1428 */
Masami Hiramatsu376e2422014-04-17 17:17:05 +09001429 list_for_each_entry(ent, &kprobe_blacklist, list) {
1430 if (addr >= ent->start_addr && addr < ent->end_addr)
1431 return true;
Srinivasa Ds3d8d9962008-04-28 02:14:26 -07001432 }
Masami Hiramatsu6143c6f2019-02-13 01:13:12 +09001433 return false;
1434}
Masami Hiramatsu376e2422014-04-17 17:17:05 +09001435
Masami Hiramatsu6143c6f2019-02-13 01:13:12 +09001436bool within_kprobe_blacklist(unsigned long addr)
1437{
1438 char symname[KSYM_NAME_LEN], *p;
1439
1440 if (__within_kprobe_blacklist(addr))
1441 return true;
1442
1443 /* Check if the address is on a suffixed-symbol */
1444 if (!lookup_symbol_name(addr, symname)) {
1445 p = strchr(symname, '.');
1446 if (!p)
1447 return false;
1448 *p = '\0';
1449 addr = (unsigned long)kprobe_lookup_name(symname, 0);
1450 if (addr)
1451 return __within_kprobe_blacklist(addr);
1452 }
Masami Hiramatsu376e2422014-04-17 17:17:05 +09001453 return false;
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001454}
1455
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001456/*
1457 * If we have a symbol_name argument, look it up and add the offset field
1458 * to it. This way, we can specify a relative address to a symbol.
Masami Hiramatsubc81d482011-06-27 16:26:50 +09001459 * This returns encoded errors if it fails to look up symbol or invalid
1460 * combination of parameters.
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001461 */
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301462static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1463 const char *symbol_name, unsigned int offset)
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001464{
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301465 if ((symbol_name && addr) || (!symbol_name && !addr))
Masami Hiramatsubc81d482011-06-27 16:26:50 +09001466 goto invalid;
1467
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301468 if (symbol_name) {
Linus Torvalds7246f602017-05-05 11:36:44 -07001469 addr = kprobe_lookup_name(symbol_name, offset);
Masami Hiramatsubc81d482011-06-27 16:26:50 +09001470 if (!addr)
1471 return ERR_PTR(-ENOENT);
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001472 }
1473
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301474 addr = (kprobe_opcode_t *)(((char *)addr) + offset);
Masami Hiramatsubc81d482011-06-27 16:26:50 +09001475 if (addr)
1476 return addr;
1477
1478invalid:
1479 return ERR_PTR(-EINVAL);
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001480}
1481
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301482static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1483{
1484 return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1485}
1486
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301487/* Check passed kprobe is valid and return kprobe in kprobe_table. */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001488static struct kprobe *__get_valid_kprobe(struct kprobe *p)
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301489{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001490 struct kprobe *ap, *list_p;
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301491
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09001492 lockdep_assert_held(&kprobe_mutex);
1493
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001494 ap = get_kprobe(p->addr);
1495 if (unlikely(!ap))
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301496 return NULL;
1497
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001498 if (p != ap) {
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09001499 list_for_each_entry(list_p, &ap->list, list)
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301500 if (list_p == p)
1501 /* kprobe p is a valid probe */
1502 goto valid;
1503 return NULL;
1504 }
1505valid:
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001506 return ap;
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301507}
1508
Masami Hiramatsu33b1d142021-02-03 23:59:27 +09001509/*
1510 * Warn and return error if the kprobe is being re-registered since
1511 * there must be a software bug.
1512 */
1513static inline int warn_kprobe_rereg(struct kprobe *p)
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301514{
1515 int ret = 0;
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301516
1517 mutex_lock(&kprobe_mutex);
Masami Hiramatsu33b1d142021-02-03 23:59:27 +09001518 if (WARN_ON_ONCE(__get_valid_kprobe(p)))
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301519 ret = -EINVAL;
1520 mutex_unlock(&kprobe_mutex);
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001521
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301522 return ret;
1523}
1524
Punit Agrawal4402dea2021-09-14 23:39:16 +09001525static int check_ftrace_location(struct kprobe *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526{
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001527 unsigned long ftrace_addr;
1528
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001529 ftrace_addr = ftrace_location((unsigned long)p->addr);
1530 if (ftrace_addr) {
Masami Hiramatsue7dbfe32012-09-28 17:15:20 +09001531#ifdef CONFIG_KPROBES_ON_FTRACE
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001532 /* Given address is not on the instruction boundary */
1533 if ((unsigned long)p->addr != ftrace_addr)
1534 return -EILSEQ;
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001535 p->flags |= KPROBE_FLAG_FTRACE;
Masami Hiramatsue7dbfe32012-09-28 17:15:20 +09001536#else /* !CONFIG_KPROBES_ON_FTRACE */
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001537 return -EINVAL;
1538#endif
1539 }
Heiko Carstensf7f242f2014-10-15 12:17:34 +02001540 return 0;
1541}
Masami Hiramatsuf7fa6ef02012-06-05 19:28:20 +09001542
Heiko Carstensf7f242f2014-10-15 12:17:34 +02001543static int check_kprobe_address_safe(struct kprobe *p,
1544 struct module **probed_mod)
1545{
1546 int ret;
1547
Punit Agrawal4402dea2021-09-14 23:39:16 +09001548 ret = check_ftrace_location(p);
Heiko Carstensf7f242f2014-10-15 12:17:34 +02001549 if (ret)
1550 return ret;
Masami Hiramatsuf7fa6ef02012-06-05 19:28:20 +09001551 jump_label_lock();
1552 preempt_disable();
1553
1554 /* Ensure it is not in reserved area nor out of text */
1555 if (!kernel_text_address((unsigned long) p->addr) ||
Masami Hiramatsu376e2422014-04-17 17:17:05 +09001556 within_kprobe_blacklist((unsigned long) p->addr) ||
Masami Hiramatsue336b402019-09-03 20:08:21 +09001557 jump_label_text_reserved(p->addr, p->addr) ||
Peter Zijlstrafa68bd02021-06-28 13:24:12 +02001558 static_call_text_reserved(p->addr, p->addr) ||
Masami Hiramatsue336b402019-09-03 20:08:21 +09001559 find_bug((unsigned long)p->addr)) {
Masami Hiramatsuf7fa6ef02012-06-05 19:28:20 +09001560 ret = -EINVAL;
1561 goto out;
1562 }
1563
1564 /* Check if are we probing a module */
1565 *probed_mod = __module_text_address((unsigned long) p->addr);
1566 if (*probed_mod) {
1567 /*
1568 * We must hold a refcount of the probed module while updating
1569 * its code to prohibit unexpected unloading.
1570 */
1571 if (unlikely(!try_module_get(*probed_mod))) {
1572 ret = -ENOENT;
1573 goto out;
1574 }
1575
1576 /*
1577 * If the module freed .init.text, we couldn't insert
1578 * kprobes in there.
1579 */
1580 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1581 (*probed_mod)->state != MODULE_STATE_COMING) {
1582 module_put(*probed_mod);
1583 *probed_mod = NULL;
1584 ret = -ENOENT;
1585 }
1586 }
1587out:
1588 preempt_enable();
1589 jump_label_unlock();
1590
1591 return ret;
1592}
1593
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001594int register_kprobe(struct kprobe *p)
Masami Hiramatsuf7fa6ef02012-06-05 19:28:20 +09001595{
1596 int ret;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001597 struct kprobe *old_p;
Keshavamurthy Anil Sdf019b12006-01-11 12:17:41 -08001598 struct module *probed_mod;
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001599 kprobe_opcode_t *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600
Masami Hiramatsuf7fa6ef02012-06-05 19:28:20 +09001601 /* Adjust probe address from symbol */
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001602 addr = kprobe_addr(p);
Masami Hiramatsubc81d482011-06-27 16:26:50 +09001603 if (IS_ERR(addr))
1604 return PTR_ERR(addr);
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001605 p->addr = addr;
Ananth N Mavinakayanahalli3a872d82006-10-02 02:17:30 -07001606
Masami Hiramatsu33b1d142021-02-03 23:59:27 +09001607 ret = warn_kprobe_rereg(p);
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301608 if (ret)
1609 return ret;
1610
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001611 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1612 p->flags &= KPROBE_FLAG_DISABLED;
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -08001613 p->nmissed = 0;
Masami Hiramatsu98616682008-04-28 02:14:28 -07001614 INIT_LIST_HEAD(&p->list);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001615
Masami Hiramatsuf7fa6ef02012-06-05 19:28:20 +09001616 ret = check_kprobe_address_safe(p, &probed_mod);
1617 if (ret)
1618 return ret;
1619
1620 mutex_lock(&kprobe_mutex);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001621
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001622 old_p = get_kprobe(p->addr);
1623 if (old_p) {
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001624 /* Since this may unoptimize old_p, locking text_mutex. */
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001625 ret = register_aggr_kprobe(old_p, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 goto out;
1627 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +02001629 cpus_read_lock();
1630 /* Prevent text modification */
1631 mutex_lock(&text_mutex);
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09001632 ret = prepare_kprobe(p);
Masami Hiramatsu25764282012-06-05 19:28:26 +09001633 mutex_unlock(&text_mutex);
Thomas Gleixner2d1e38f2017-05-24 10:15:36 +02001634 cpus_read_unlock();
Christoph Hellwig6f716ac2007-05-08 00:34:13 -07001635 if (ret)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001636 goto out;
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001637
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001638 INIT_HLIST_NODE(&p->hlist);
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -08001639 hlist_add_head_rcu(&p->hlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1641
Jessica Yu12310e342018-01-10 00:51:23 +01001642 if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1643 ret = arm_kprobe(p);
1644 if (ret) {
1645 hlist_del_rcu(&p->hlist);
Paul E. McKenneyae8b7ce2018-11-06 19:04:39 -08001646 synchronize_rcu();
Jessica Yu12310e342018-01-10 00:51:23 +01001647 goto out;
1648 }
1649 }
Christoph Hellwig74a0b572007-10-16 01:24:07 -07001650
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001651 /* Try to optimize kprobe */
1652 try_to_optimize_kprobe(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653out:
Ingo Molnar7a7d1cf2006-03-23 03:00:35 -08001654 mutex_unlock(&kprobe_mutex);
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001655
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001656 if (probed_mod)
Keshavamurthy Anil Sdf019b12006-01-11 12:17:41 -08001657 module_put(probed_mod);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001658
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 return ret;
1660}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001661EXPORT_SYMBOL_GPL(register_kprobe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001663/* Check if all probes on the aggrprobe are disabled */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001664static int aggr_kprobe_disabled(struct kprobe *ap)
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001665{
1666 struct kprobe *kp;
1667
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09001668 lockdep_assert_held(&kprobe_mutex);
1669
1670 list_for_each_entry(kp, &ap->list, list)
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001671 if (!kprobe_disabled(kp))
1672 /*
1673 * There is an active probe on the list.
1674 * We can't disable this ap.
1675 */
1676 return 0;
1677
1678 return 1;
1679}
1680
1681/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001682static struct kprobe *__disable_kprobe(struct kprobe *p)
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001683{
1684 struct kprobe *orig_p;
Jessica Yu297f9232018-01-10 00:51:24 +01001685 int ret;
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001686
1687 /* Get an original kprobe for return */
1688 orig_p = __get_valid_kprobe(p);
1689 if (unlikely(orig_p == NULL))
Jessica Yu297f9232018-01-10 00:51:24 +01001690 return ERR_PTR(-EINVAL);
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001691
1692 if (!kprobe_disabled(p)) {
1693 /* Disable probe if it is a child probe */
1694 if (p != orig_p)
1695 p->flags |= KPROBE_FLAG_DISABLED;
1696
1697 /* Try to disarm and disable this/parent probe */
1698 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
Wang Nan69d54b92015-02-13 14:40:26 -08001699 /*
1700 * If kprobes_all_disarmed is set, orig_p
1701 * should have already been disarmed, so
1702 * skip unneed disarming process.
1703 */
Jessica Yu297f9232018-01-10 00:51:24 +01001704 if (!kprobes_all_disarmed) {
1705 ret = disarm_kprobe(orig_p, true);
1706 if (ret) {
1707 p->flags &= ~KPROBE_FLAG_DISABLED;
1708 return ERR_PTR(ret);
1709 }
1710 }
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001711 orig_p->flags |= KPROBE_FLAG_DISABLED;
1712 }
1713 }
1714
1715 return orig_p;
1716}
1717
Masami Hiramatsu98616682008-04-28 02:14:28 -07001718/*
1719 * Unregister a kprobe without a scheduler synchronization.
1720 */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001721static int __unregister_kprobe_top(struct kprobe *p)
Keshavamurthy Anil Sdf019b12006-01-11 12:17:41 -08001722{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001723 struct kprobe *ap, *list_p;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001724
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001725 /* Disable kprobe. This will disarm it if needed. */
1726 ap = __disable_kprobe(p);
Jessica Yu297f9232018-01-10 00:51:24 +01001727 if (IS_ERR(ap))
1728 return PTR_ERR(ap);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001729
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001730 if (ap == p)
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07001731 /*
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001732 * This probe is an independent(and non-optimized) kprobe
1733 * (not an aggrprobe). Remove from the hash list.
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07001734 */
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001735 goto disarmed;
1736
1737 /* Following process expects this probe is an aggrprobe */
1738 WARN_ON(!kprobe_aggrprobe(ap));
1739
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001740 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1741 /*
1742 * !disarmed could be happen if the probe is under delayed
1743 * unoptimizing.
1744 */
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001745 goto disarmed;
1746 else {
1747 /* If disabling probe has special handlers, update aggrprobe */
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001748 if (p->post_handler && !kprobe_gone(p)) {
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09001749 list_for_each_entry(list_p, &ap->list, list) {
Masami Hiramatsu98616682008-04-28 02:14:28 -07001750 if ((list_p != p) && (list_p->post_handler))
1751 goto noclean;
1752 }
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001753 ap->post_handler = NULL;
Masami Hiramatsu98616682008-04-28 02:14:28 -07001754 }
1755noclean:
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001756 /*
1757 * Remove from the aggrprobe: this path will do nothing in
1758 * __unregister_kprobe_bottom().
1759 */
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001760 list_del_rcu(&p->list);
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001761 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1762 /*
1763 * Try to optimize this probe again, because post
1764 * handler may have been changed.
1765 */
1766 optimize_kprobe(ap);
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001767 }
Masami Hiramatsu98616682008-04-28 02:14:28 -07001768 return 0;
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001769
1770disarmed:
1771 hlist_del_rcu(&ap->hlist);
1772 return 0;
Masami Hiramatsu98616682008-04-28 02:14:28 -07001773}
Mao, Bibob3e55c72005-12-12 00:37:00 -08001774
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001775static void __unregister_kprobe_bottom(struct kprobe *p)
Masami Hiramatsu98616682008-04-28 02:14:28 -07001776{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001777 struct kprobe *ap;
Mao, Bibob3e55c72005-12-12 00:37:00 -08001778
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001779 if (list_empty(&p->list))
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001780 /* This is an independent kprobe */
Ananth N Mavinakayanahalli0498b632006-01-09 20:52:46 -08001781 arch_remove_kprobe(p);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001782 else if (list_is_singular(&p->list)) {
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001783 /* This is the last child of an aggrprobe */
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001784 ap = list_entry(p->list.next, struct kprobe, list);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001785 list_del(&p->list);
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001786 free_aggr_kprobe(ap);
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001787 }
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001788 /* Otherwise, do nothing. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789}
1790
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001791int register_kprobes(struct kprobe **kps, int num)
Masami Hiramatsu98616682008-04-28 02:14:28 -07001792{
1793 int i, ret = 0;
1794
1795 if (num <= 0)
1796 return -EINVAL;
1797 for (i = 0; i < num; i++) {
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001798 ret = register_kprobe(kps[i]);
Masami Hiramatsu67dddaa2008-06-12 15:21:35 -07001799 if (ret < 0) {
1800 if (i > 0)
1801 unregister_kprobes(kps, i);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001802 break;
1803 }
1804 }
1805 return ret;
1806}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001807EXPORT_SYMBOL_GPL(register_kprobes);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001808
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001809void unregister_kprobe(struct kprobe *p)
Masami Hiramatsu98616682008-04-28 02:14:28 -07001810{
1811 unregister_kprobes(&p, 1);
1812}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001813EXPORT_SYMBOL_GPL(unregister_kprobe);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001814
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001815void unregister_kprobes(struct kprobe **kps, int num)
Masami Hiramatsu98616682008-04-28 02:14:28 -07001816{
1817 int i;
1818
1819 if (num <= 0)
1820 return;
1821 mutex_lock(&kprobe_mutex);
1822 for (i = 0; i < num; i++)
1823 if (__unregister_kprobe_top(kps[i]) < 0)
1824 kps[i]->addr = NULL;
1825 mutex_unlock(&kprobe_mutex);
1826
Paul E. McKenneyae8b7ce2018-11-06 19:04:39 -08001827 synchronize_rcu();
Masami Hiramatsu98616682008-04-28 02:14:28 -07001828 for (i = 0; i < num; i++)
1829 if (kps[i]->addr)
1830 __unregister_kprobe_bottom(kps[i]);
1831}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001832EXPORT_SYMBOL_GPL(unregister_kprobes);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001833
Naveen N. Rao5f6bee32017-03-08 22:34:15 +05301834int __weak kprobe_exceptions_notify(struct notifier_block *self,
1835 unsigned long val, void *data)
Naveen N. Raofc62d022017-02-08 01:24:14 +05301836{
1837 return NOTIFY_DONE;
1838}
Naveen N. Rao5f6bee32017-03-08 22:34:15 +05301839NOKPROBE_SYMBOL(kprobe_exceptions_notify);
Naveen N. Raofc62d022017-02-08 01:24:14 +05301840
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841static struct notifier_block kprobe_exceptions_nb = {
1842 .notifier_call = kprobe_exceptions_notify,
Anil S Keshavamurthy3d5631e2006-06-26 00:25:28 -07001843 .priority = 0x7fffffff /* we need to be notified first */
1844};
1845
Michael Ellerman3d7e3382007-07-19 01:48:11 -07001846unsigned long __weak arch_deref_entry_point(void *entry)
1847{
1848 return (unsigned long)entry;
1849}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850
Ananth N Mavinakayanahalli9edddaa2008-03-04 14:28:37 -08001851#ifdef CONFIG_KRETPROBES
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001852
1853unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
1854 void *trampoline_address,
1855 void *frame_pointer)
1856{
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001857 kprobe_opcode_t *correct_ret_addr = NULL;
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001858 struct kretprobe_instance *ri = NULL;
1859 struct llist_node *first, *node;
1860 struct kretprobe *rp;
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001861
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001862 /* Find all nodes for this frame. */
1863 first = node = current->kretprobe_instances.first;
1864 while (node) {
1865 ri = container_of(node, struct kretprobe_instance, llist);
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001866
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001867 BUG_ON(ri->fp != frame_pointer);
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001868
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001869 if (ri->ret_addr != trampoline_address) {
1870 correct_ret_addr = ri->ret_addr;
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001871 /*
1872 * This is the real return address. Any other
1873 * instances associated with this task are for
1874 * other calls deeper on the call stack
1875 */
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001876 goto found;
1877 }
1878
1879 node = node->next;
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001880 }
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09001881 pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001882 BUG_ON(1);
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001883
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001884found:
1885 /* Unlink all nodes for this frame. */
1886 current->kretprobe_instances.first = node->next;
1887 node->next = NULL;
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001888
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001889 /* Run them.. */
1890 while (first) {
1891 ri = container_of(first, struct kretprobe_instance, llist);
1892 first = first->next;
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001893
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001894 rp = get_kretprobe(ri);
1895 if (rp && rp->handler) {
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001896 struct kprobe *prev = kprobe_running();
1897
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001898 __this_cpu_write(current_kprobe, &rp->kp);
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001899 ri->ret_addr = correct_ret_addr;
Peter Zijlstrad741bf42020-08-29 22:03:24 +09001900 rp->handler(ri, regs);
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001901 __this_cpu_write(current_kprobe, prev);
1902 }
1903
Masami Hiramatsub3388172020-08-29 22:02:47 +09001904 recycle_rp_inst(ri);
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001905 }
1906
Masami Hiramatsu66ada2c2020-08-29 22:00:01 +09001907 return (unsigned long)correct_ret_addr;
1908}
1909NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
1910
Adrian Bunke65cefe2006-02-03 03:03:42 -08001911/*
1912 * This kprobe pre_handler is registered with every kretprobe. When probe
1913 * hits it will set up the return probe.
1914 */
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001915static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
Adrian Bunke65cefe2006-02-03 03:03:42 -08001916{
1917 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001918 struct kretprobe_instance *ri;
Peter Zijlstra6e426e02020-08-29 22:03:56 +09001919 struct freelist_node *fn;
Adrian Bunke65cefe2006-02-03 03:03:42 -08001920
Peter Zijlstra6e426e02020-08-29 22:03:56 +09001921 fn = freelist_try_get(&rp->freelist);
1922 if (!fn) {
Christoph Hellwig4c4308c2007-05-08 00:34:14 -07001923 rp->nmissed++;
Peter Zijlstra6e426e02020-08-29 22:03:56 +09001924 return 0;
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001925 }
Peter Zijlstra6e426e02020-08-29 22:03:56 +09001926
1927 ri = container_of(fn, struct kretprobe_instance, freelist);
1928
1929 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1930 freelist_add(&ri->freelist, &rp->freelist);
1931 return 0;
1932 }
1933
1934 arch_prepare_kretprobe(ri, regs);
1935
1936 __llist_add(&ri->llist, &current->kretprobe_instances);
1937
Adrian Bunke65cefe2006-02-03 03:03:42 -08001938 return 0;
1939}
Masami Hiramatsu820aede2014-04-17 17:18:21 +09001940NOKPROBE_SYMBOL(pre_handler_kretprobe);
Adrian Bunke65cefe2006-02-03 03:03:42 -08001941
Naveen N. Rao659b9572017-07-07 22:37:24 +05301942bool __weak arch_kprobe_on_func_entry(unsigned long offset)
Naveen N. Rao90ec5e82017-02-22 19:23:37 +05301943{
1944 return !offset;
1945}
1946
Masami Hiramatsu97c753e2021-01-28 00:37:51 +09001947/**
1948 * kprobe_on_func_entry() -- check whether given address is function entry
1949 * @addr: Target address
1950 * @sym: Target symbol name
1951 * @offset: The offset from the symbol or the address
1952 *
1953 * This checks whether the given @addr+@offset or @sym+@offset is on the
1954 * function entry address or not.
1955 * This returns 0 if it is the function entry, or -EINVAL if it is not.
1956 * And also it returns -ENOENT if it fails the symbol or address lookup.
1957 * Caller must pass @addr or @sym (either one must be NULL), or this
1958 * returns -EINVAL.
1959 */
1960int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301961{
1962 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1963
1964 if (IS_ERR(kp_addr))
Masami Hiramatsu97c753e2021-01-28 00:37:51 +09001965 return PTR_ERR(kp_addr);
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301966
Masami Hiramatsu97c753e2021-01-28 00:37:51 +09001967 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
1968 return -ENOENT;
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301969
Masami Hiramatsu97c753e2021-01-28 00:37:51 +09001970 if (!arch_kprobe_on_func_entry(offset))
1971 return -EINVAL;
1972
1973 return 0;
Naveen N. Rao1d585e72017-03-08 13:56:06 +05301974}
1975
Masami Hiramatsu55479f62014-04-17 17:17:54 +09001976int register_kretprobe(struct kretprobe *rp)
Hien Nguyenb94cce92005-06-23 00:09:19 -07001977{
Masami Hiramatsu97c753e2021-01-28 00:37:51 +09001978 int ret;
Hien Nguyenb94cce92005-06-23 00:09:19 -07001979 struct kretprobe_instance *inst;
1980 int i;
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001981 void *addr;
Naveen N. Rao90ec5e82017-02-22 19:23:37 +05301982
Masami Hiramatsu97c753e2021-01-28 00:37:51 +09001983 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
1984 if (ret)
1985 return ret;
Masami Hiramatsuf438d912007-10-16 01:27:49 -07001986
Wang ShaoBo0188b872021-01-28 20:44:27 +08001987 /* If only rp->kp.addr is specified, check reregistering kprobes */
Masami Hiramatsu33b1d142021-02-03 23:59:27 +09001988 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
Wang ShaoBo0188b872021-01-28 20:44:27 +08001989 return -EINVAL;
1990
Masami Hiramatsuf438d912007-10-16 01:27:49 -07001991 if (kretprobe_blacklist_size) {
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001992 addr = kprobe_addr(&rp->kp);
Masami Hiramatsubc81d482011-06-27 16:26:50 +09001993 if (IS_ERR(addr))
1994 return PTR_ERR(addr);
Masami Hiramatsuf438d912007-10-16 01:27:49 -07001995
1996 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1997 if (kretprobe_blacklist[i].addr == addr)
1998 return -EINVAL;
1999 }
2000 }
Hien Nguyenb94cce92005-06-23 00:09:19 -07002001
2002 rp->kp.pre_handler = pre_handler_kretprobe;
Ananth N Mavinakayanahalli7522a842006-04-20 02:43:11 -07002003 rp->kp.post_handler = NULL;
Hien Nguyenb94cce92005-06-23 00:09:19 -07002004
2005 /* Pre-allocate memory for max kretprobe instances */
2006 if (rp->maxactive <= 0) {
Thomas Gleixner92616602019-07-26 23:19:41 +02002007#ifdef CONFIG_PREEMPTION
Heiko Carstensc2ef6662009-12-21 13:02:24 +01002008 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
Hien Nguyenb94cce92005-06-23 00:09:19 -07002009#else
Ananth N Mavinakayanahalli4dae5602009-10-30 19:23:10 +05302010 rp->maxactive = num_possible_cpus();
Hien Nguyenb94cce92005-06-23 00:09:19 -07002011#endif
2012 }
Peter Zijlstra6e426e02020-08-29 22:03:56 +09002013 rp->freelist.head = NULL;
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002014 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2015 if (!rp->rph)
2016 return -ENOMEM;
2017
2018 rp->rph->rp = rp;
Hien Nguyenb94cce92005-06-23 00:09:19 -07002019 for (i = 0; i < rp->maxactive; i++) {
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002020 inst = kzalloc(sizeof(struct kretprobe_instance) +
Abhishek Sagarf47cd9b2008-02-06 01:38:22 -08002021 rp->data_size, GFP_KERNEL);
Hien Nguyenb94cce92005-06-23 00:09:19 -07002022 if (inst == NULL) {
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002023 refcount_set(&rp->rph->ref, i);
Hien Nguyenb94cce92005-06-23 00:09:19 -07002024 free_rp_inst(rp);
2025 return -ENOMEM;
2026 }
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002027 inst->rph = rp->rph;
Peter Zijlstra6e426e02020-08-29 22:03:56 +09002028 freelist_add(&inst->freelist, &rp->freelist);
Hien Nguyenb94cce92005-06-23 00:09:19 -07002029 }
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002030 refcount_set(&rp->rph->ref, i);
Hien Nguyenb94cce92005-06-23 00:09:19 -07002031
2032 rp->nmissed = 0;
2033 /* Establish function entry probe point */
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08002034 ret = register_kprobe(&rp->kp);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002035 if (ret != 0)
Hien Nguyenb94cce92005-06-23 00:09:19 -07002036 free_rp_inst(rp);
2037 return ret;
2038}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002039EXPORT_SYMBOL_GPL(register_kretprobe);
Hien Nguyenb94cce92005-06-23 00:09:19 -07002040
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002041int register_kretprobes(struct kretprobe **rps, int num)
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002042{
2043 int ret = 0, i;
2044
2045 if (num <= 0)
2046 return -EINVAL;
2047 for (i = 0; i < num; i++) {
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08002048 ret = register_kretprobe(rps[i]);
Masami Hiramatsu67dddaa2008-06-12 15:21:35 -07002049 if (ret < 0) {
2050 if (i > 0)
2051 unregister_kretprobes(rps, i);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002052 break;
2053 }
2054 }
2055 return ret;
2056}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002057EXPORT_SYMBOL_GPL(register_kretprobes);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002058
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002059void unregister_kretprobe(struct kretprobe *rp)
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002060{
2061 unregister_kretprobes(&rp, 1);
2062}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002063EXPORT_SYMBOL_GPL(unregister_kretprobe);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002064
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002065void unregister_kretprobes(struct kretprobe **rps, int num)
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002066{
2067 int i;
2068
2069 if (num <= 0)
2070 return;
2071 mutex_lock(&kprobe_mutex);
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002072 for (i = 0; i < num; i++) {
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002073 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2074 rps[i]->kp.addr = NULL;
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002075 rps[i]->rph->rp = NULL;
2076 }
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002077 mutex_unlock(&kprobe_mutex);
2078
Paul E. McKenneyae8b7ce2018-11-06 19:04:39 -08002079 synchronize_rcu();
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002080 for (i = 0; i < num; i++) {
2081 if (rps[i]->kp.addr) {
2082 __unregister_kprobe_bottom(&rps[i]->kp);
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002083 free_rp_inst(rps[i]);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002084 }
2085 }
2086}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002087EXPORT_SYMBOL_GPL(unregister_kretprobes);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002088
Ananth N Mavinakayanahalli9edddaa2008-03-04 14:28:37 -08002089#else /* CONFIG_KRETPROBES */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002090int register_kretprobe(struct kretprobe *rp)
Hien Nguyenb94cce92005-06-23 00:09:19 -07002091{
2092 return -ENOSYS;
2093}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002094EXPORT_SYMBOL_GPL(register_kretprobe);
Hien Nguyenb94cce92005-06-23 00:09:19 -07002095
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002096int register_kretprobes(struct kretprobe **rps, int num)
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002097{
2098 return -ENOSYS;
2099}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002100EXPORT_SYMBOL_GPL(register_kretprobes);
2101
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002102void unregister_kretprobe(struct kretprobe *rp)
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002103{
2104}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002105EXPORT_SYMBOL_GPL(unregister_kretprobe);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002106
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002107void unregister_kretprobes(struct kretprobe **rps, int num)
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002108{
2109}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002110EXPORT_SYMBOL_GPL(unregister_kretprobes);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002111
Masami Hiramatsu820aede2014-04-17 17:18:21 +09002112static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
Srinivasa Ds346fd592007-02-20 13:57:54 -08002113{
2114 return 0;
2115}
Masami Hiramatsu820aede2014-04-17 17:18:21 +09002116NOKPROBE_SYMBOL(pre_handler_kretprobe);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07002117
Ananth N Mavinakayanahalli9edddaa2008-03-04 14:28:37 -08002118#endif /* CONFIG_KRETPROBES */
Hien Nguyenb94cce92005-06-23 00:09:19 -07002119
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002120/* Set the kprobe gone and remove its instruction buffer. */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002121static void kill_kprobe(struct kprobe *p)
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002122{
2123 struct kprobe *kp;
Masami Hiramatsude5bd882009-04-06 19:01:02 -07002124
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09002125 lockdep_assert_held(&kprobe_mutex);
2126
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002127 p->flags |= KPROBE_FLAG_GONE;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002128 if (kprobe_aggrprobe(p)) {
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002129 /*
2130 * If this is an aggr_kprobe, we have to list all the
2131 * chained probes and mark them GONE.
2132 */
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09002133 list_for_each_entry(kp, &p->list, list)
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002134 kp->flags |= KPROBE_FLAG_GONE;
2135 p->post_handler = NULL;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002136 kill_optimized_kprobe(p);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002137 }
2138 /*
2139 * Here, we can remove insn_slot safely, because no thread calls
2140 * the original probed function (which will be freed soon) any more.
2141 */
2142 arch_remove_kprobe(p);
Muchun Song0cb2f132020-07-28 14:45:36 +08002143
2144 /*
2145 * The module is going away. We should disarm the kprobe which
Masami Hiramatsubcb53202020-09-01 00:12:07 +09002146 * is using ftrace, because ftrace framework is still available at
2147 * MODULE_STATE_GOING notification.
Muchun Song0cb2f132020-07-28 14:45:36 +08002148 */
Masami Hiramatsubcb53202020-09-01 00:12:07 +09002149 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
Muchun Song0cb2f132020-07-28 14:45:36 +08002150 disarm_kprobe_ftrace(p);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002151}
2152
Masami Hiramatsuc0614822010-04-27 18:33:12 -04002153/* Disable one kprobe */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002154int disable_kprobe(struct kprobe *kp)
Masami Hiramatsuc0614822010-04-27 18:33:12 -04002155{
2156 int ret = 0;
Jessica Yu297f9232018-01-10 00:51:24 +01002157 struct kprobe *p;
Masami Hiramatsuc0614822010-04-27 18:33:12 -04002158
2159 mutex_lock(&kprobe_mutex);
2160
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09002161 /* Disable this kprobe */
Jessica Yu297f9232018-01-10 00:51:24 +01002162 p = __disable_kprobe(kp);
2163 if (IS_ERR(p))
2164 ret = PTR_ERR(p);
Masami Hiramatsuc0614822010-04-27 18:33:12 -04002165
Masami Hiramatsuc0614822010-04-27 18:33:12 -04002166 mutex_unlock(&kprobe_mutex);
2167 return ret;
2168}
2169EXPORT_SYMBOL_GPL(disable_kprobe);
2170
2171/* Enable one kprobe */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002172int enable_kprobe(struct kprobe *kp)
Masami Hiramatsuc0614822010-04-27 18:33:12 -04002173{
2174 int ret = 0;
2175 struct kprobe *p;
2176
2177 mutex_lock(&kprobe_mutex);
2178
2179 /* Check whether specified probe is valid. */
2180 p = __get_valid_kprobe(kp);
2181 if (unlikely(p == NULL)) {
2182 ret = -EINVAL;
2183 goto out;
2184 }
2185
2186 if (kprobe_gone(kp)) {
2187 /* This kprobe has gone, we couldn't enable it. */
2188 ret = -EINVAL;
2189 goto out;
2190 }
2191
2192 if (p != kp)
2193 kp->flags &= ~KPROBE_FLAG_DISABLED;
2194
2195 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2196 p->flags &= ~KPROBE_FLAG_DISABLED;
Jessica Yu12310e342018-01-10 00:51:23 +01002197 ret = arm_kprobe(p);
2198 if (ret)
2199 p->flags |= KPROBE_FLAG_DISABLED;
Masami Hiramatsuc0614822010-04-27 18:33:12 -04002200 }
2201out:
2202 mutex_unlock(&kprobe_mutex);
2203 return ret;
2204}
2205EXPORT_SYMBOL_GPL(enable_kprobe);
2206
Masami Hiramatsu44585152018-04-28 21:36:33 +09002207/* Caller must NOT call this in usual path. This is only for critical case */
Masami Hiramatsu820aede2014-04-17 17:18:21 +09002208void dump_kprobe(struct kprobe *kp)
Frederic Weisbecker24851d22009-08-26 23:38:30 +02002209{
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09002210 pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
Masami Hiramatsu44585152018-04-28 21:36:33 +09002211 kp->symbol_name, kp->offset, kp->addr);
Frederic Weisbecker24851d22009-08-26 23:38:30 +02002212}
Masami Hiramatsu820aede2014-04-17 17:18:21 +09002213NOKPROBE_SYMBOL(dump_kprobe);
Frederic Weisbecker24851d22009-08-26 23:38:30 +02002214
Masami Hiramatsufb1a59f2018-12-17 17:20:55 +09002215int kprobe_add_ksym_blacklist(unsigned long entry)
2216{
2217 struct kprobe_blacklist_entry *ent;
2218 unsigned long offset = 0, size = 0;
2219
2220 if (!kernel_text_address(entry) ||
2221 !kallsyms_lookup_size_offset(entry, &size, &offset))
2222 return -EINVAL;
2223
2224 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2225 if (!ent)
2226 return -ENOMEM;
2227 ent->start_addr = entry;
2228 ent->end_addr = entry + size;
2229 INIT_LIST_HEAD(&ent->list);
2230 list_add_tail(&ent->list, &kprobe_blacklist);
2231
2232 return (int)size;
2233}
2234
2235/* Add all symbols in given area into kprobe blacklist */
2236int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2237{
2238 unsigned long entry;
2239 int ret = 0;
2240
2241 for (entry = start; entry < end; entry += ret) {
2242 ret = kprobe_add_ksym_blacklist(entry);
2243 if (ret < 0)
2244 return ret;
2245 if (ret == 0) /* In case of alias symbol */
2246 ret = 1;
2247 }
2248 return 0;
2249}
2250
Masami Hiramatsu1e6769b2020-03-26 23:49:48 +09002251/* Remove all symbols in given area from kprobe blacklist */
2252static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2253{
2254 struct kprobe_blacklist_entry *ent, *n;
2255
2256 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2257 if (ent->start_addr < start || ent->start_addr >= end)
2258 continue;
2259 list_del(&ent->list);
2260 kfree(ent);
2261 }
2262}
2263
Masami Hiramatsu16db6262020-03-26 23:50:00 +09002264static void kprobe_remove_ksym_blacklist(unsigned long entry)
2265{
2266 kprobe_remove_area_blacklist(entry, entry + 1);
2267}
2268
Adrian Hunterd002b8b2020-05-28 11:00:58 +03002269int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2270 char *type, char *sym)
2271{
2272 return -ERANGE;
2273}
2274
2275int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2276 char *sym)
2277{
2278#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2279 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2280 return 0;
2281#ifdef CONFIG_OPTPROBES
2282 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2283 return 0;
2284#endif
2285#endif
2286 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2287 return 0;
2288 return -ERANGE;
2289}
2290
Masami Hiramatsufb1a59f2018-12-17 17:20:55 +09002291int __init __weak arch_populate_kprobe_blacklist(void)
2292{
2293 return 0;
2294}
2295
Masami Hiramatsu376e2422014-04-17 17:17:05 +09002296/*
2297 * Lookup and populate the kprobe_blacklist.
2298 *
2299 * Unlike the kretprobe blacklist, we'll need to determine
2300 * the range of addresses that belong to the said functions,
2301 * since a kprobe need not necessarily be at the beginning
2302 * of a function.
2303 */
2304static int __init populate_kprobe_blacklist(unsigned long *start,
2305 unsigned long *end)
2306{
Masami Hiramatsufb1a59f2018-12-17 17:20:55 +09002307 unsigned long entry;
Masami Hiramatsu376e2422014-04-17 17:17:05 +09002308 unsigned long *iter;
Masami Hiramatsufb1a59f2018-12-17 17:20:55 +09002309 int ret;
Masami Hiramatsu376e2422014-04-17 17:17:05 +09002310
2311 for (iter = start; iter < end; iter++) {
Masami Hiramatsud81b4252014-07-17 11:44:11 +00002312 entry = arch_deref_entry_point((void *)*iter);
Masami Hiramatsufb1a59f2018-12-17 17:20:55 +09002313 ret = kprobe_add_ksym_blacklist(entry);
2314 if (ret == -EINVAL)
Masami Hiramatsu376e2422014-04-17 17:17:05 +09002315 continue;
Masami Hiramatsufb1a59f2018-12-17 17:20:55 +09002316 if (ret < 0)
2317 return ret;
Masami Hiramatsu376e2422014-04-17 17:17:05 +09002318 }
Masami Hiramatsufb1a59f2018-12-17 17:20:55 +09002319
2320 /* Symbols in __kprobes_text are blacklisted */
2321 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2322 (unsigned long)__kprobes_text_end);
Thomas Gleixner66e9b072020-03-10 14:04:34 +01002323 if (ret)
2324 return ret;
2325
2326 /* Symbols in noinstr section are blacklisted */
2327 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2328 (unsigned long)__noinstr_text_end);
Masami Hiramatsufb1a59f2018-12-17 17:20:55 +09002329
2330 return ret ? : arch_populate_kprobe_blacklist();
Masami Hiramatsu376e2422014-04-17 17:17:05 +09002331}
2332
Masami Hiramatsu1e6769b2020-03-26 23:49:48 +09002333static void add_module_kprobe_blacklist(struct module *mod)
2334{
2335 unsigned long start, end;
Masami Hiramatsu16db6262020-03-26 23:50:00 +09002336 int i;
2337
2338 if (mod->kprobe_blacklist) {
2339 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2340 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2341 }
Masami Hiramatsu1e6769b2020-03-26 23:49:48 +09002342
2343 start = (unsigned long)mod->kprobes_text_start;
2344 if (start) {
2345 end = start + mod->kprobes_text_size;
2346 kprobe_add_area_blacklist(start, end);
2347 }
Thomas Gleixner66e9b072020-03-10 14:04:34 +01002348
2349 start = (unsigned long)mod->noinstr_text_start;
2350 if (start) {
2351 end = start + mod->noinstr_text_size;
2352 kprobe_add_area_blacklist(start, end);
2353 }
Masami Hiramatsu1e6769b2020-03-26 23:49:48 +09002354}
2355
2356static void remove_module_kprobe_blacklist(struct module *mod)
2357{
2358 unsigned long start, end;
Masami Hiramatsu16db6262020-03-26 23:50:00 +09002359 int i;
2360
2361 if (mod->kprobe_blacklist) {
2362 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2363 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2364 }
Masami Hiramatsu1e6769b2020-03-26 23:49:48 +09002365
2366 start = (unsigned long)mod->kprobes_text_start;
2367 if (start) {
2368 end = start + mod->kprobes_text_size;
2369 kprobe_remove_area_blacklist(start, end);
2370 }
Thomas Gleixner66e9b072020-03-10 14:04:34 +01002371
2372 start = (unsigned long)mod->noinstr_text_start;
2373 if (start) {
2374 end = start + mod->noinstr_text_size;
2375 kprobe_remove_area_blacklist(start, end);
2376 }
Masami Hiramatsu1e6769b2020-03-26 23:49:48 +09002377}
2378
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002379/* Module notifier call back, checking kprobes on the module */
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002380static int kprobes_module_callback(struct notifier_block *nb,
2381 unsigned long val, void *data)
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002382{
2383 struct module *mod = data;
2384 struct hlist_head *head;
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002385 struct kprobe *p;
2386 unsigned int i;
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08002387 int checkcore = (val == MODULE_STATE_GOING);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002388
Masami Hiramatsu1e6769b2020-03-26 23:49:48 +09002389 if (val == MODULE_STATE_COMING) {
2390 mutex_lock(&kprobe_mutex);
2391 add_module_kprobe_blacklist(mod);
2392 mutex_unlock(&kprobe_mutex);
2393 }
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08002394 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002395 return NOTIFY_DONE;
2396
2397 /*
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08002398 * When MODULE_STATE_GOING was notified, both of module .text and
2399 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2400 * notified, only .init.text section would be freed. We need to
2401 * disable kprobes which have been inserted in the sections.
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002402 */
2403 mutex_lock(&kprobe_mutex);
2404 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2405 head = &kprobe_table[i];
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09002406 hlist_for_each_entry(p, head, hlist)
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08002407 if (within_module_init((unsigned long)p->addr, mod) ||
2408 (checkcore &&
2409 within_module_core((unsigned long)p->addr, mod))) {
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002410 /*
2411 * The vaddr this probe is installed will soon
2412 * be vfreed buy not synced to disk. Hence,
2413 * disarming the breakpoint isn't needed.
Steven Rostedt (VMware)545a0282017-05-16 14:58:35 -04002414 *
2415 * Note, this will also move any optimized probes
2416 * that are pending to be removed from their
2417 * corresponding lists to the freeing_list and
2418 * will not be touched by the delayed
2419 * kprobe_optimizer work handler.
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002420 */
2421 kill_kprobe(p);
2422 }
2423 }
Masami Hiramatsu1e6769b2020-03-26 23:49:48 +09002424 if (val == MODULE_STATE_GOING)
2425 remove_module_kprobe_blacklist(mod);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002426 mutex_unlock(&kprobe_mutex);
2427 return NOTIFY_DONE;
2428}
2429
2430static struct notifier_block kprobe_module_nb = {
2431 .notifier_call = kprobes_module_callback,
2432 .priority = 0
2433};
2434
Masami Hiramatsu376e2422014-04-17 17:17:05 +09002435/* Markers of _kprobe_blacklist section */
2436extern unsigned long __start_kprobe_blacklist[];
2437extern unsigned long __stop_kprobe_blacklist[];
2438
Masami Hiramatsu82d083a2020-09-10 17:55:05 +09002439void kprobe_free_init_mem(void)
2440{
2441 void *start = (void *)(&__init_begin);
2442 void *end = (void *)(&__init_end);
2443 struct hlist_head *head;
2444 struct kprobe *p;
2445 int i;
2446
2447 mutex_lock(&kprobe_mutex);
2448
2449 /* Kill all kprobes on initmem */
2450 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2451 head = &kprobe_table[i];
2452 hlist_for_each_entry(p, head, hlist) {
2453 if (start <= (void *)p->addr && (void *)p->addr < end)
2454 kill_kprobe(p);
2455 }
2456 }
2457
2458 mutex_unlock(&kprobe_mutex);
2459}
2460
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461static int __init init_kprobes(void)
2462{
2463 int i, err = 0;
2464
2465 /* FIXME allocate the probe table, currently defined statically */
2466 /* initialize all list heads */
Peter Zijlstrad741bf42020-08-29 22:03:24 +09002467 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 INIT_HLIST_HEAD(&kprobe_table[i]);
2469
Masami Hiramatsu376e2422014-04-17 17:17:05 +09002470 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2471 __stop_kprobe_blacklist);
2472 if (err) {
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09002473 pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err);
Srinivasa Ds3d8d9962008-04-28 02:14:26 -07002474 }
2475
Masami Hiramatsuf438d912007-10-16 01:27:49 -07002476 if (kretprobe_blacklist_size) {
2477 /* lookup the function address from its name */
2478 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
Naveen N. Rao49e0b462017-04-19 18:21:00 +05302479 kretprobe_blacklist[i].addr =
Naveen N. Rao290e3072017-04-19 18:21:01 +05302480 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
Masami Hiramatsuf438d912007-10-16 01:27:49 -07002481 if (!kretprobe_blacklist[i].addr)
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09002482 pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
Masami Hiramatsuf438d912007-10-16 01:27:49 -07002483 kretprobe_blacklist[i].name);
2484 }
2485 }
2486
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002487 /* By default, kprobes are armed */
2488 kprobes_all_disarmed = false;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002489
Masami Hiramatsuc85c9a22021-02-18 23:29:23 +09002490#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2491 /* Init kprobe_optinsn_slots for allocation */
2492 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2493#endif
2494
Rusty Lynch67729262005-07-05 18:54:50 -07002495 err = arch_init_kprobes();
Rusty Lynch802eae72005-06-27 15:17:08 -07002496 if (!err)
2497 err = register_die_notifier(&kprobe_exceptions_nb);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08002498 if (!err)
2499 err = register_module_notifier(&kprobe_module_nb);
2500
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07002501 kprobes_initialized = (err == 0);
Rusty Lynch802eae72005-06-27 15:17:08 -07002502
Ananth N Mavinakayanahalli8c1c9352008-01-30 13:32:53 +01002503 if (!err)
2504 init_test_probes();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 return err;
2506}
Masami Hiramatsu36dadef2020-09-10 21:38:39 +09002507early_initcall(init_kprobes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508
Masami Hiramatsuc85c9a22021-02-18 23:29:23 +09002509#if defined(CONFIG_OPTPROBES)
2510static int __init init_optprobes(void)
2511{
2512 /*
2513 * Enable kprobe optimization - this kicks the optimizer which
2514 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2515 * not spawned in early initcall. So delay the optimization.
2516 */
2517 optimize_all_kprobes();
2518
2519 return 0;
2520}
2521subsys_initcall(init_optprobes);
2522#endif
2523
Srinivasa Ds346fd592007-02-20 13:57:54 -08002524#ifdef CONFIG_DEBUG_FS
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002525static void report_probe(struct seq_file *pi, struct kprobe *p,
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002526 const char *sym, int offset, char *modname, struct kprobe *pp)
Srinivasa Ds346fd592007-02-20 13:57:54 -08002527{
2528 char *kprobe_type;
Masami Hiramatsu81365a92018-04-28 21:36:02 +09002529 void *addr = p->addr;
Srinivasa Ds346fd592007-02-20 13:57:54 -08002530
2531 if (p->pre_handler == pre_handler_kretprobe)
2532 kprobe_type = "r";
Srinivasa Ds346fd592007-02-20 13:57:54 -08002533 else
2534 kprobe_type = "k";
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002535
Kees Cook60f7bb62020-07-02 15:20:22 -07002536 if (!kallsyms_show_value(pi->file->f_cred))
Masami Hiramatsu81365a92018-04-28 21:36:02 +09002537 addr = NULL;
2538
Srinivasa Ds346fd592007-02-20 13:57:54 -08002539 if (sym)
Masami Hiramatsu81365a92018-04-28 21:36:02 +09002540 seq_printf(pi, "%px %s %s+0x%x %s ",
2541 addr, kprobe_type, sym, offset,
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002542 (modname ? modname : " "));
Masami Hiramatsu81365a92018-04-28 21:36:02 +09002543 else /* try to use %pS */
2544 seq_printf(pi, "%px %s %pS ",
2545 addr, kprobe_type, p->addr);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002546
2547 if (!pp)
2548 pp = p;
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09002549 seq_printf(pi, "%s%s%s%s\n",
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002550 (kprobe_gone(p) ? "[GONE]" : ""),
2551 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
Masami Hiramatsuae6aa162012-06-05 19:28:32 +09002552 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2553 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
Srinivasa Ds346fd592007-02-20 13:57:54 -08002554}
2555
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002556static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
Srinivasa Ds346fd592007-02-20 13:57:54 -08002557{
2558 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2559}
2560
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002561static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
Srinivasa Ds346fd592007-02-20 13:57:54 -08002562{
2563 (*pos)++;
2564 if (*pos >= KPROBE_TABLE_SIZE)
2565 return NULL;
2566 return pos;
2567}
2568
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002569static void kprobe_seq_stop(struct seq_file *f, void *v)
Srinivasa Ds346fd592007-02-20 13:57:54 -08002570{
2571 /* Nothing to do */
2572}
2573
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002574static int show_kprobe_addr(struct seq_file *pi, void *v)
Srinivasa Ds346fd592007-02-20 13:57:54 -08002575{
2576 struct hlist_head *head;
Srinivasa Ds346fd592007-02-20 13:57:54 -08002577 struct kprobe *p, *kp;
2578 const char *sym = NULL;
2579 unsigned int i = *(loff_t *) v;
Alexey Dobriyanffb45122007-05-08 00:28:41 -07002580 unsigned long offset = 0;
Joe Marioab767862013-11-12 15:10:23 -08002581 char *modname, namebuf[KSYM_NAME_LEN];
Srinivasa Ds346fd592007-02-20 13:57:54 -08002582
2583 head = &kprobe_table[i];
2584 preempt_disable();
Sasha Levinb67bfe02013-02-27 17:06:00 -08002585 hlist_for_each_entry_rcu(p, head, hlist) {
Alexey Dobriyanffb45122007-05-08 00:28:41 -07002586 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
Srinivasa Ds346fd592007-02-20 13:57:54 -08002587 &offset, &modname, namebuf);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002588 if (kprobe_aggrprobe(p)) {
Srinivasa Ds346fd592007-02-20 13:57:54 -08002589 list_for_each_entry_rcu(kp, &p->list, list)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002590 report_probe(pi, kp, sym, offset, modname, p);
Srinivasa Ds346fd592007-02-20 13:57:54 -08002591 } else
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002592 report_probe(pi, p, sym, offset, modname, NULL);
Srinivasa Ds346fd592007-02-20 13:57:54 -08002593 }
2594 preempt_enable();
2595 return 0;
2596}
2597
Kefeng Wangeac2cece2020-06-04 16:51:11 -07002598static const struct seq_operations kprobes_sops = {
Srinivasa Ds346fd592007-02-20 13:57:54 -08002599 .start = kprobe_seq_start,
2600 .next = kprobe_seq_next,
2601 .stop = kprobe_seq_stop,
2602 .show = show_kprobe_addr
2603};
2604
Kefeng Wangeac2cece2020-06-04 16:51:11 -07002605DEFINE_SEQ_ATTRIBUTE(kprobes);
Srinivasa Ds346fd592007-02-20 13:57:54 -08002606
Masami Hiramatsu63724742014-04-17 17:18:49 +09002607/* kprobes/blacklist -- shows which functions can not be probed */
2608static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2609{
Masami Hiramatsu4fdd8882020-03-26 23:49:36 +09002610 mutex_lock(&kprobe_mutex);
Masami Hiramatsu63724742014-04-17 17:18:49 +09002611 return seq_list_start(&kprobe_blacklist, *pos);
2612}
2613
2614static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2615{
2616 return seq_list_next(v, &kprobe_blacklist, pos);
2617}
2618
2619static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2620{
2621 struct kprobe_blacklist_entry *ent =
2622 list_entry(v, struct kprobe_blacklist_entry, list);
2623
Masami Hiramatsuffb9bd62018-04-28 21:35:32 +09002624 /*
2625 * If /proc/kallsyms is not showing kernel address, we won't
2626 * show them here either.
2627 */
Kees Cook60f7bb62020-07-02 15:20:22 -07002628 if (!kallsyms_show_value(m->file->f_cred))
Masami Hiramatsuffb9bd62018-04-28 21:35:32 +09002629 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2630 (void *)ent->start_addr);
2631 else
2632 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2633 (void *)ent->end_addr, (void *)ent->start_addr);
Masami Hiramatsu63724742014-04-17 17:18:49 +09002634 return 0;
2635}
2636
Masami Hiramatsu4fdd8882020-03-26 23:49:36 +09002637static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2638{
2639 mutex_unlock(&kprobe_mutex);
2640}
2641
Kefeng Wangeac2cece2020-06-04 16:51:11 -07002642static const struct seq_operations kprobe_blacklist_sops = {
Masami Hiramatsu63724742014-04-17 17:18:49 +09002643 .start = kprobe_blacklist_seq_start,
2644 .next = kprobe_blacklist_seq_next,
Masami Hiramatsu4fdd8882020-03-26 23:49:36 +09002645 .stop = kprobe_blacklist_seq_stop,
Masami Hiramatsu63724742014-04-17 17:18:49 +09002646 .show = kprobe_blacklist_seq_show,
2647};
Kefeng Wangeac2cece2020-06-04 16:51:11 -07002648DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
Masami Hiramatsu63724742014-04-17 17:18:49 +09002649
Jessica Yu12310e342018-01-10 00:51:23 +01002650static int arm_all_kprobes(void)
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002651{
2652 struct hlist_head *head;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002653 struct kprobe *p;
Jessica Yu12310e342018-01-10 00:51:23 +01002654 unsigned int i, total = 0, errors = 0;
2655 int err, ret = 0;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002656
2657 mutex_lock(&kprobe_mutex);
2658
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002659 /* If kprobes are armed, just return */
2660 if (!kprobes_all_disarmed)
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002661 goto already_enabled;
2662
Wang Nan977ad482015-02-13 14:40:24 -08002663 /*
2664 * optimize_kprobe() called by arm_kprobe() checks
2665 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2666 * arm_kprobe.
2667 */
2668 kprobes_all_disarmed = false;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002669 /* Arming kprobes doesn't optimize kprobe itself */
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002670 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2671 head = &kprobe_table[i];
Jessica Yu12310e342018-01-10 00:51:23 +01002672 /* Arm all kprobes on a best-effort basis */
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09002673 hlist_for_each_entry(p, head, hlist) {
Jessica Yu12310e342018-01-10 00:51:23 +01002674 if (!kprobe_disabled(p)) {
2675 err = arm_kprobe(p);
2676 if (err) {
2677 errors++;
2678 ret = err;
2679 }
2680 total++;
2681 }
2682 }
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002683 }
2684
Jessica Yu12310e342018-01-10 00:51:23 +01002685 if (errors)
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09002686 pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
Jessica Yu12310e342018-01-10 00:51:23 +01002687 errors, total);
2688 else
2689 pr_info("Kprobes globally enabled\n");
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002690
2691already_enabled:
2692 mutex_unlock(&kprobe_mutex);
Jessica Yu12310e342018-01-10 00:51:23 +01002693 return ret;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002694}
2695
Jessica Yu297f9232018-01-10 00:51:24 +01002696static int disarm_all_kprobes(void)
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002697{
2698 struct hlist_head *head;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002699 struct kprobe *p;
Jessica Yu297f9232018-01-10 00:51:24 +01002700 unsigned int i, total = 0, errors = 0;
2701 int err, ret = 0;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002702
2703 mutex_lock(&kprobe_mutex);
2704
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002705 /* If kprobes are already disarmed, just return */
Masami Hiramatsu6274de42010-12-03 18:54:09 +09002706 if (kprobes_all_disarmed) {
2707 mutex_unlock(&kprobe_mutex);
Jessica Yu297f9232018-01-10 00:51:24 +01002708 return 0;
Masami Hiramatsu6274de42010-12-03 18:54:09 +09002709 }
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002710
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002711 kprobes_all_disarmed = true;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002712
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002713 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2714 head = &kprobe_table[i];
Jessica Yu297f9232018-01-10 00:51:24 +01002715 /* Disarm all kprobes on a best-effort basis */
Masami Hiramatsu7e6a71d2020-05-12 17:02:44 +09002716 hlist_for_each_entry(p, head, hlist) {
Jessica Yu297f9232018-01-10 00:51:24 +01002717 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2718 err = disarm_kprobe(p, false);
2719 if (err) {
2720 errors++;
2721 ret = err;
2722 }
2723 total++;
2724 }
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002725 }
2726 }
Jessica Yu297f9232018-01-10 00:51:24 +01002727
2728 if (errors)
Masami Hiramatsu9c89bb82021-09-14 23:39:25 +09002729 pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
Jessica Yu297f9232018-01-10 00:51:24 +01002730 errors, total);
2731 else
2732 pr_info("Kprobes globally disabled\n");
2733
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002734 mutex_unlock(&kprobe_mutex);
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002735
Masami Hiramatsu6274de42010-12-03 18:54:09 +09002736 /* Wait for disarming all kprobes by optimizer */
2737 wait_for_kprobe_optimizer();
Jessica Yu297f9232018-01-10 00:51:24 +01002738
2739 return ret;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002740}
2741
2742/*
2743 * XXX: The debugfs bool file interface doesn't allow for callbacks
2744 * when the bool state is switched. We can reuse that facility when
2745 * available
2746 */
2747static ssize_t read_enabled_file_bool(struct file *file,
2748 char __user *user_buf, size_t count, loff_t *ppos)
2749{
2750 char buf[3];
2751
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002752 if (!kprobes_all_disarmed)
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002753 buf[0] = '1';
2754 else
2755 buf[0] = '0';
2756 buf[1] = '\n';
2757 buf[2] = 0x00;
2758 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2759}
2760
2761static ssize_t write_enabled_file_bool(struct file *file,
2762 const char __user *user_buf, size_t count, loff_t *ppos)
2763{
Punit Agrawal5d6de7d2021-09-14 23:38:46 +09002764 bool enable;
2765 int ret;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002766
Punit Agrawal5d6de7d2021-09-14 23:38:46 +09002767 ret = kstrtobool_from_user(user_buf, count, &enable);
2768 if (ret)
2769 return ret;
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002770
Punit Agrawal5d6de7d2021-09-14 23:38:46 +09002771 ret = enable ? arm_all_kprobes() : disarm_all_kprobes();
Jessica Yu12310e342018-01-10 00:51:23 +01002772 if (ret)
2773 return ret;
2774
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002775 return count;
2776}
2777
Alexey Dobriyan828c0952009-10-01 15:43:56 -07002778static const struct file_operations fops_kp = {
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002779 .read = read_enabled_file_bool,
2780 .write = write_enabled_file_bool,
Arnd Bergmann6038f372010-08-15 18:52:59 +02002781 .llseek = default_llseek,
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002782};
2783
Masami Hiramatsu55479f62014-04-17 17:17:54 +09002784static int __init debugfs_kprobe_init(void)
Srinivasa Ds346fd592007-02-20 13:57:54 -08002785{
Greg Kroah-Hartman8c0fd1f2019-01-22 16:21:46 +01002786 struct dentry *dir;
Srinivasa Ds346fd592007-02-20 13:57:54 -08002787
2788 dir = debugfs_create_dir("kprobes", NULL);
Srinivasa Ds346fd592007-02-20 13:57:54 -08002789
Kefeng Wangeac2cece2020-06-04 16:51:11 -07002790 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
Srinivasa Ds346fd592007-02-20 13:57:54 -08002791
Punit Agrawal8f7262c2021-09-14 23:38:37 +09002792 debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
Masami Hiramatsu63724742014-04-17 17:18:49 +09002793
Greg Kroah-Hartman8c0fd1f2019-01-22 16:21:46 +01002794 debugfs_create_file("blacklist", 0400, dir, NULL,
Kefeng Wangeac2cece2020-06-04 16:51:11 -07002795 &kprobe_blacklist_fops);
Ananth N Mavinakayanahallibf8f6e5b2007-05-08 00:34:16 -07002796
Srinivasa Ds346fd592007-02-20 13:57:54 -08002797 return 0;
2798}
2799
2800late_initcall(debugfs_kprobe_init);
2801#endif /* CONFIG_DEBUG_FS */