blob: 8d772bd6894da1c9dc33b0893f1ef1837ece70b2 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -04002/*
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04003 * Copyright (C) 2008-2014 Mathieu Desnoyers
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -04004 */
5#include <linux/module.h>
6#include <linux/mutex.h>
7#include <linux/types.h>
8#include <linux/jhash.h>
9#include <linux/list.h>
10#include <linux/rcupdate.h>
11#include <linux/tracepoint.h>
12#include <linux/err.h>
13#include <linux/slab.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010014#include <linux/sched/signal.h>
Ingo Molnar29930022017-02-08 18:51:36 +010015#include <linux/sched/task.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +010016#include <linux/static_key.h>
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040017
Mathieu Desnoyers231264d2021-08-05 09:27:16 -040018enum tp_func_state {
19 TP_FUNC_0,
20 TP_FUNC_1,
21 TP_FUNC_2,
22 TP_FUNC_N,
23};
24
Mathieu Desnoyers9c0be3f2018-10-13 15:10:50 -040025extern tracepoint_ptr_t __start___tracepoints_ptrs[];
26extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040027
Joel Fernandes (Google)e6753f22018-07-30 15:24:22 -070028DEFINE_SRCU(tracepoint_srcu);
29EXPORT_SYMBOL_GPL(tracepoint_srcu);
30
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040031/* Set to 1 to enable tracepoint debug output */
32static const int tracepoint_debug;
33
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -040034#ifdef CONFIG_MODULES
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -040035/*
36 * Tracepoint module list mutex protects the local module list.
37 */
38static DEFINE_MUTEX(tracepoint_module_list_mutex);
39
40/* Local list of struct tp_module */
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -040041static LIST_HEAD(tracepoint_module_list);
42#endif /* CONFIG_MODULES */
43
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040044/*
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -040045 * tracepoints_mutex protects the builtin and module tracepoints.
46 * tracepoints_mutex nests inside tracepoint_module_list_mutex.
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040047 */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -040048static DEFINE_MUTEX(tracepoints_mutex);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040049
Steven Rostedt (VMware)f8a79d52018-08-10 12:17:50 -040050static struct rcu_head *early_probes;
51static bool ok_to_free_tracepoints;
52
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040053/*
54 * Note about RCU :
Anand Gadiyarfd589a82009-07-16 17:13:03 +020055 * It is used to delay the free of multiple probes array until a quiescent
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040056 * state is reached.
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040057 */
Lai Jiangshan19dba332008-10-28 10:51:49 +080058struct tp_probes {
Mathieu Desnoyers0dea6d52014-03-21 01:19:01 -040059 struct rcu_head rcu;
Gustavo A. R. Silva9d0a49c2020-08-31 10:22:41 -050060 struct tracepoint_func probes[];
Lai Jiangshan19dba332008-10-28 10:51:49 +080061};
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040062
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -050063/* Called in removal of a func but failed to allocate a new tp_funcs */
64static void tp_stub_func(void)
65{
66 return;
67}
68
Lai Jiangshan19dba332008-10-28 10:51:49 +080069static inline void *allocate_probes(int count)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040070{
Gustavo A. R. Silvaf0553dc2019-06-10 16:22:19 -050071 struct tp_probes *p = kmalloc(struct_size(p, probes, count),
72 GFP_KERNEL);
Lai Jiangshan19dba332008-10-28 10:51:49 +080073 return p == NULL ? NULL : p->probes;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040074}
75
Joel Fernandes (Google)e6753f22018-07-30 15:24:22 -070076static void srcu_free_old_probes(struct rcu_head *head)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -040077{
Mathieu Desnoyers0dea6d52014-03-21 01:19:01 -040078 kfree(container_of(head, struct tp_probes, rcu));
Lai Jiangshan19dba332008-10-28 10:51:49 +080079}
80
Joel Fernandes (Google)e6753f22018-07-30 15:24:22 -070081static void rcu_free_old_probes(struct rcu_head *head)
82{
83 call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
84}
85
Steven Rostedt (VMware)f8a79d52018-08-10 12:17:50 -040086static __init int release_early_probes(void)
87{
88 struct rcu_head *tmp;
89
90 ok_to_free_tracepoints = true;
91
92 while (early_probes) {
93 tmp = early_probes;
94 early_probes = tmp->next;
Paul E. McKenney74401722018-11-06 18:44:52 -080095 call_rcu(tmp, rcu_free_old_probes);
Steven Rostedt (VMware)f8a79d52018-08-10 12:17:50 -040096 }
97
98 return 0;
99}
100
101/* SRCU is initialized at core_initcall */
102postcore_initcall(release_early_probes);
103
Steven Rostedt38516ab2010-04-20 17:04:50 -0400104static inline void release_probes(struct tracepoint_func *old)
Lai Jiangshan19dba332008-10-28 10:51:49 +0800105{
106 if (old) {
107 struct tp_probes *tp_probes = container_of(old,
108 struct tp_probes, probes[0]);
Steven Rostedt (VMware)f8a79d52018-08-10 12:17:50 -0400109
110 /*
111 * We can't free probes if SRCU is not initialized yet.
112 * Postpone the freeing till after SRCU is initialized.
113 */
114 if (unlikely(!ok_to_free_tracepoints)) {
115 tp_probes->rcu.next = early_probes;
116 early_probes = &tp_probes->rcu;
117 return;
118 }
119
Joel Fernandes (Google)e6753f22018-07-30 15:24:22 -0700120 /*
121 * Tracepoint probes are protected by both sched RCU and SRCU,
122 * by calling the SRCU callback in the sched RCU callback we
123 * cover both cases. So let us chain the SRCU and sched RCU
124 * callbacks to wait for both grace periods.
125 */
Paul E. McKenney74401722018-11-06 18:44:52 -0800126 call_rcu(&tp_probes->rcu, rcu_free_old_probes);
Lai Jiangshan19dba332008-10-28 10:51:49 +0800127 }
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400128}
129
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400130static void debug_print_probes(struct tracepoint_func *funcs)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400131{
132 int i;
133
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400134 if (!tracepoint_debug || !funcs)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400135 return;
136
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400137 for (i = 0; funcs[i].func; i++)
138 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400139}
140
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400141static struct tracepoint_func *
142func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
143 int prio)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400144{
Steven Rostedt38516ab2010-04-20 17:04:50 -0400145 struct tracepoint_func *old, *new;
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500146 int iter_probes; /* Iterate over old probe array. */
147 int nr_probes = 0; /* Counter for probes */
148 int pos = -1; /* Insertion position into new array */
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400149
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400150 if (WARN_ON(!tp_func->func))
Sahara4c69e6e2013-04-15 11:13:15 +0900151 return ERR_PTR(-EINVAL);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400152
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400153 debug_print_probes(*funcs);
154 old = *funcs;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400155 if (old) {
156 /* (N -> N+1), (N != 0, 1) probes */
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500157 for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
158 if (old[iter_probes].func == tp_stub_func)
159 continue; /* Skip stub functions. */
160 if (old[iter_probes].func == tp_func->func &&
161 old[iter_probes].data == tp_func->data)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400162 return ERR_PTR(-EEXIST);
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500163 nr_probes++;
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400164 }
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400165 }
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500166 /* + 2 : one for new probe, one for NULL func */
167 new = allocate_probes(nr_probes + 2);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400168 if (new == NULL)
169 return ERR_PTR(-ENOMEM);
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400170 if (old) {
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500171 nr_probes = 0;
172 for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
173 if (old[iter_probes].func == tp_stub_func)
174 continue;
175 /* Insert before probes of lower priority */
176 if (pos < 0 && old[iter_probes].prio < prio)
177 pos = nr_probes++;
178 new[nr_probes++] = old[iter_probes];
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400179 }
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500180 if (pos < 0)
181 pos = nr_probes++;
182 /* nr_probes now points to the end of the new array */
183 } else {
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400184 pos = 0;
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500185 nr_probes = 1; /* must point at end of array */
186 }
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400187 new[pos] = *tp_func;
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500188 new[nr_probes].func = NULL;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400189 *funcs = new;
190 debug_print_probes(*funcs);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400191 return old;
192}
193
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400194static void *func_remove(struct tracepoint_func **funcs,
195 struct tracepoint_func *tp_func)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400196{
197 int nr_probes = 0, nr_del = 0, i;
Steven Rostedt38516ab2010-04-20 17:04:50 -0400198 struct tracepoint_func *old, *new;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400199
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400200 old = *funcs;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400201
Frederic Weisbeckerf66af452008-10-22 19:14:55 +0200202 if (!old)
Lai Jiangshan19dba332008-10-28 10:51:49 +0800203 return ERR_PTR(-ENOENT);
Frederic Weisbeckerf66af452008-10-22 19:14:55 +0200204
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400205 debug_print_probes(*funcs);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400206 /* (N -> M), (N > 1, M >= 0) probes */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400207 if (tp_func->func) {
Sahara4c69e6e2013-04-15 11:13:15 +0900208 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -0500209 if ((old[nr_probes].func == tp_func->func &&
210 old[nr_probes].data == tp_func->data) ||
211 old[nr_probes].func == tp_stub_func)
Sahara4c69e6e2013-04-15 11:13:15 +0900212 nr_del++;
213 }
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400214 }
215
Sahara4c69e6e2013-04-15 11:13:15 +0900216 /*
217 * If probe is NULL, then nr_probes = nr_del = 0, and then the
218 * entire entry will be removed.
219 */
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400220 if (nr_probes - nr_del == 0) {
221 /* N -> 0, (N > 1) */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400222 *funcs = NULL;
223 debug_print_probes(*funcs);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400224 return old;
225 } else {
226 int j = 0;
227 /* N -> M, (N > 1, M > 0) */
228 /* + 1 for NULL */
Lai Jiangshan19dba332008-10-28 10:51:49 +0800229 new = allocate_probes(nr_probes - nr_del + 1);
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -0500230 if (new) {
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500231 for (i = 0; old[i].func; i++) {
232 if ((old[i].func != tp_func->func ||
233 old[i].data != tp_func->data) &&
234 old[i].func != tp_stub_func)
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -0500235 new[j++] = old[i];
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500236 }
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -0500237 new[nr_probes - nr_del].func = NULL;
238 *funcs = new;
239 } else {
240 /*
241 * Failed to allocate, replace the old function
242 * with calls to tp_stub_func.
243 */
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500244 for (i = 0; old[i].func; i++) {
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -0500245 if (old[i].func == tp_func->func &&
Steven Rostedt (VMware)7211f0a2021-02-04 14:30:04 -0500246 old[i].data == tp_func->data)
247 WRITE_ONCE(old[i].func, tp_stub_func);
248 }
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -0500249 *funcs = old;
250 }
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400251 }
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400252 debug_print_probes(*funcs);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400253 return old;
254}
255
Mathieu Desnoyers231264d2021-08-05 09:27:16 -0400256/*
257 * Count the number of functions (enum tp_func_state) in a tp_funcs array.
258 */
259static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
260{
261 if (!tp_funcs)
262 return TP_FUNC_0;
263 if (!tp_funcs[1].func)
264 return TP_FUNC_1;
265 if (!tp_funcs[2].func)
266 return TP_FUNC_2;
267 return TP_FUNC_N; /* 3 or more */
268}
269
270static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
Steven Rostedt (VMware)d25e37d2020-08-18 15:57:52 +0200271{
272 void *func = tp->iterator;
273
274 /* Synthetic events do not have static call sites */
275 if (!tp->static_call_key)
276 return;
Mathieu Desnoyers231264d2021-08-05 09:27:16 -0400277 if (nr_func_state(tp_funcs) == TP_FUNC_1)
Steven Rostedt (VMware)d25e37d2020-08-18 15:57:52 +0200278 func = tp_funcs[0].func;
Steven Rostedt (VMware)d25e37d2020-08-18 15:57:52 +0200279 __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
280}
281
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400282/*
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400283 * Add the probe function to a tracepoint.
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400284 */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400285static int tracepoint_add_func(struct tracepoint *tp,
Steven Rostedt (VMware)9913d572021-06-29 09:40:10 -0400286 struct tracepoint_func *func, int prio,
287 bool warn)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400288{
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400289 struct tracepoint_func *old, *tp_funcs;
Steven Rostedt (Red Hat)8cf868a2016-11-28 13:03:21 -0500290 int ret;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400291
Steven Rostedt (Red Hat)8cf868a2016-11-28 13:03:21 -0500292 if (tp->regfunc && !static_key_enabled(&tp->key)) {
293 ret = tp->regfunc();
294 if (ret < 0)
295 return ret;
296 }
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400297
Mathieu Desnoyersb725dfe2014-04-09 09:24:43 -0400298 tp_funcs = rcu_dereference_protected(tp->funcs,
299 lockdep_is_held(&tracepoints_mutex));
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400300 old = func_add(&tp_funcs, func, prio);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400301 if (IS_ERR(old)) {
Steven Rostedt (VMware)9913d572021-06-29 09:40:10 -0400302 WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400303 return PTR_ERR(old);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400304 }
Josh Stone97419872009-08-24 14:43:13 -0700305
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400306 /*
Paul E. McKenney243d1a72017-10-09 11:30:11 -0700307 * rcu_assign_pointer has as smp_store_release() which makes sure
308 * that the new probe callbacks array is consistent before setting
309 * a pointer to it. This array is referenced by __DO_TRACE from
310 * include/linux/tracepoint.h using rcu_dereference_sched().
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400311 */
Mathieu Desnoyers231264d2021-08-05 09:27:16 -0400312 switch (nr_func_state(tp_funcs)) {
313 case TP_FUNC_1: /* 0->1 */
314 /* Set static call to first function */
315 tracepoint_update_call(tp, tp_funcs);
316 /* Both iterator and static call handle NULL tp->funcs */
317 rcu_assign_pointer(tp->funcs, tp_funcs);
318 static_key_enable(&tp->key);
319 break;
320 case TP_FUNC_2: /* 1->2 */
321 /* Set iterator static call */
322 tracepoint_update_call(tp, tp_funcs);
323 /*
324 * Iterator callback installed before updating tp->funcs.
325 * Requires ordering between RCU assign/dereference and
326 * static call update/call.
327 */
328 rcu_assign_pointer(tp->funcs, tp_funcs);
329 break;
330 case TP_FUNC_N: /* N->N+1 (N>1) */
331 rcu_assign_pointer(tp->funcs, tp_funcs);
332 break;
333 default:
334 WARN_ON_ONCE(1);
335 break;
336 }
Steven Rostedt (VMware)d25e37d2020-08-18 15:57:52 +0200337
Mathieu Desnoyers8058bd02014-05-08 07:47:49 -0400338 release_probes(old);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400339 return 0;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400340}
341
342/*
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400343 * Remove a probe function from a tracepoint.
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400344 * Note: only waiting an RCU period after setting elem->call to the empty
345 * function insures that the original callback is not used anymore. This insured
346 * by preempt_disable around the call site.
347 */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400348static int tracepoint_remove_func(struct tracepoint *tp,
349 struct tracepoint_func *func)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400350{
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400351 struct tracepoint_func *old, *tp_funcs;
Josh Stone97419872009-08-24 14:43:13 -0700352
Mathieu Desnoyersb725dfe2014-04-09 09:24:43 -0400353 tp_funcs = rcu_dereference_protected(tp->funcs,
354 lockdep_is_held(&tracepoints_mutex));
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400355 old = func_remove(&tp_funcs, func);
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -0500356 if (WARN_ON_ONCE(IS_ERR(old)))
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400357 return PTR_ERR(old);
Steven Rostedt (VMware)befe6d92020-11-18 09:34:05 -0500358
359 if (tp_funcs == old)
360 /* Failed allocating new tp_funcs, replaced func with stub */
361 return 0;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400362
Mathieu Desnoyers231264d2021-08-05 09:27:16 -0400363 switch (nr_func_state(tp_funcs)) {
364 case TP_FUNC_0: /* 1->0 */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400365 /* Removed last function */
366 if (tp->unregfunc && static_key_enabled(&tp->key))
367 tp->unregfunc();
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400368
Steven Rostedt (VMware)d25e37d2020-08-18 15:57:52 +0200369 static_key_disable(&tp->key);
Mathieu Desnoyers231264d2021-08-05 09:27:16 -0400370 /* Set iterator static call */
371 tracepoint_update_call(tp, tp_funcs);
372 /* Both iterator and static call handle NULL tp->funcs */
373 rcu_assign_pointer(tp->funcs, NULL);
374 /*
375 * Make sure new func never uses old data after a 1->0->1
376 * transition sequence.
377 * Considering that transition 0->1 is the common case
378 * and don't have rcu-sync, issue rcu-sync after
379 * transition 1->0 to break that sequence by waiting for
380 * readers to be quiescent.
381 */
382 tracepoint_synchronize_unregister();
383 break;
384 case TP_FUNC_1: /* 2->1 */
Steven Rostedt (VMware)547305a2020-10-01 21:27:57 -0400385 rcu_assign_pointer(tp->funcs, tp_funcs);
Mathieu Desnoyers231264d2021-08-05 09:27:16 -0400386 /*
387 * On 2->1 transition, RCU sync is needed before setting
388 * static call to first callback, because the observer
389 * may have loaded any prior tp->funcs after the last one
390 * associated with an rcu-sync.
391 */
392 tracepoint_synchronize_unregister();
393 /* Set static call to first function */
394 tracepoint_update_call(tp, tp_funcs);
395 break;
396 case TP_FUNC_2: /* N->N-1 (N>2) */
397 fallthrough;
398 case TP_FUNC_N:
Steven Rostedt (VMware)547305a2020-10-01 21:27:57 -0400399 rcu_assign_pointer(tp->funcs, tp_funcs);
Mathieu Desnoyers231264d2021-08-05 09:27:16 -0400400 break;
401 default:
402 WARN_ON_ONCE(1);
403 break;
Lai Jiangshan127cafb2008-10-28 10:51:53 +0800404 }
Mathieu Desnoyers8058bd02014-05-08 07:47:49 -0400405 release_probes(old);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400406 return 0;
Lai Jiangshan127cafb2008-10-28 10:51:53 +0800407}
408
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400409/**
Steven Rostedt (VMware)9913d572021-06-29 09:40:10 -0400410 * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority
411 * @tp: tracepoint
412 * @probe: probe handler
413 * @data: tracepoint data
414 * @prio: priority of this function over other registered functions
415 *
416 * Same as tracepoint_probe_register_prio() except that it will not warn
417 * if the tracepoint is already registered.
418 */
419int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
420 void *data, int prio)
421{
422 struct tracepoint_func tp_func;
423 int ret;
424
425 mutex_lock(&tracepoints_mutex);
426 tp_func.func = probe;
427 tp_func.data = data;
428 tp_func.prio = prio;
429 ret = tracepoint_add_func(tp, &tp_func, prio, false);
430 mutex_unlock(&tracepoints_mutex);
431 return ret;
432}
433EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
434
435/**
Lee, Chun-Yif39e2392017-06-16 16:26:43 +0800436 * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400437 * @tp: tracepoint
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400438 * @probe: probe handler
Fabian Frederickcac92ba2014-06-04 16:11:23 -0700439 * @data: tracepoint data
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400440 * @prio: priority of this function over other registered functions
441 *
442 * Returns 0 if ok, error value on error.
443 * Note: if @tp is within a module, the caller is responsible for
444 * unregistering the probe before the module is gone. This can be
445 * performed either with a tracepoint module going notifier, or from
446 * within module exit functions.
447 */
448int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
449 void *data, int prio)
450{
451 struct tracepoint_func tp_func;
452 int ret;
453
454 mutex_lock(&tracepoints_mutex);
455 tp_func.func = probe;
456 tp_func.data = data;
457 tp_func.prio = prio;
Steven Rostedt (VMware)9913d572021-06-29 09:40:10 -0400458 ret = tracepoint_add_func(tp, &tp_func, prio, true);
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400459 mutex_unlock(&tracepoints_mutex);
460 return ret;
461}
462EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
463
464/**
465 * tracepoint_probe_register - Connect a probe to a tracepoint
466 * @tp: tracepoint
467 * @probe: probe handler
468 * @data: tracepoint data
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400469 *
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400470 * Returns 0 if ok, error value on error.
471 * Note: if @tp is within a module, the caller is responsible for
472 * unregistering the probe before the module is gone. This can be
473 * performed either with a tracepoint module going notifier, or from
474 * within module exit functions.
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400475 */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400476int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400477{
Steven Rostedt (Red Hat)7904b5c2015-09-22 17:13:19 -0400478 return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400479}
480EXPORT_SYMBOL_GPL(tracepoint_probe_register);
481
482/**
483 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400484 * @tp: tracepoint
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400485 * @probe: probe function pointer
Fabian Frederickcac92ba2014-06-04 16:11:23 -0700486 * @data: tracepoint data
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400487 *
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400488 * Returns 0 if ok, error value on error.
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400489 */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400490int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400491{
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400492 struct tracepoint_func tp_func;
493 int ret;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400494
495 mutex_lock(&tracepoints_mutex);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400496 tp_func.func = probe;
497 tp_func.data = data;
498 ret = tracepoint_remove_func(tp, &tp_func);
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400499 mutex_unlock(&tracepoints_mutex);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400500 return ret;
Mathieu Desnoyers97e1c182008-07-18 12:16:16 -0400501}
502EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
503
Mathieu Desnoyers9c0be3f2018-10-13 15:10:50 -0400504static void for_each_tracepoint_range(
505 tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
Ard Biesheuvel46e0c9b2018-08-21 21:56:22 -0700506 void (*fct)(struct tracepoint *tp, void *priv),
507 void *priv)
508{
Mathieu Desnoyers9c0be3f2018-10-13 15:10:50 -0400509 tracepoint_ptr_t *iter;
510
Ard Biesheuvel46e0c9b2018-08-21 21:56:22 -0700511 if (!begin)
512 return;
Mathieu Desnoyers9c0be3f2018-10-13 15:10:50 -0400513 for (iter = begin; iter < end; iter++)
514 fct(tracepoint_ptr_deref(iter), priv);
Ard Biesheuvel46e0c9b2018-08-21 21:56:22 -0700515}
516
Ingo Molnar227a8372008-11-16 09:50:34 +0100517#ifdef CONFIG_MODULES
Steven Rostedt (Red Hat)45ab2812014-02-26 13:37:38 -0500518bool trace_module_has_bad_taint(struct module *mod)
519{
Mathieu Desnoyers66cc69e2014-03-13 12:11:30 +1030520 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
521 (1 << TAINT_UNSIGNED_MODULE));
Steven Rostedt (Red Hat)45ab2812014-02-26 13:37:38 -0500522}
523
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400524static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
525
526/**
527 * register_tracepoint_notifier - register tracepoint coming/going notifier
528 * @nb: notifier block
529 *
530 * Notifiers registered with this function are called on module
531 * coming/going with the tracepoint_module_list_mutex held.
532 * The notifier block callback should expect a "struct tp_module" data
533 * pointer.
534 */
535int register_tracepoint_module_notifier(struct notifier_block *nb)
536{
537 struct tp_module *tp_mod;
538 int ret;
539
540 mutex_lock(&tracepoint_module_list_mutex);
541 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
542 if (ret)
543 goto end;
544 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
545 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
546end:
547 mutex_unlock(&tracepoint_module_list_mutex);
548 return ret;
549}
550EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
551
552/**
553 * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
554 * @nb: notifier block
555 *
556 * The notifier block callback should expect a "struct tp_module" data
557 * pointer.
558 */
559int unregister_tracepoint_module_notifier(struct notifier_block *nb)
560{
561 struct tp_module *tp_mod;
562 int ret;
563
564 mutex_lock(&tracepoint_module_list_mutex);
565 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
566 if (ret)
567 goto end;
568 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
569 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
570end:
571 mutex_unlock(&tracepoint_module_list_mutex);
572 return ret;
573
574}
575EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
576
577/*
578 * Ensure the tracer unregistered the module's probes before the module
579 * teardown is performed. Prevents leaks of probe and data pointers.
580 */
Ard Biesheuvel46e0c9b2018-08-21 21:56:22 -0700581static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400582{
Ard Biesheuvel46e0c9b2018-08-21 21:56:22 -0700583 WARN_ON_ONCE(tp->funcs);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400584}
585
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400586static int tracepoint_module_coming(struct module *mod)
587{
Mathieu Desnoyers0dea6d52014-03-21 01:19:01 -0400588 struct tp_module *tp_mod;
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400589 int ret = 0;
590
Steven Rostedt (Red Hat)7dec9352014-02-26 10:54:36 -0500591 if (!mod->num_tracepoints)
592 return 0;
593
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400594 /*
Steven Rostedtc10076c2012-01-13 21:40:59 -0500595 * We skip modules that taint the kernel, especially those with different
596 * module headers (for forced load), to make sure we don't cause a crash.
Mathieu Desnoyers66cc69e2014-03-13 12:11:30 +1030597 * Staging, out-of-tree, and unsigned GPL modules are fine.
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400598 */
Steven Rostedt (Red Hat)45ab2812014-02-26 13:37:38 -0500599 if (trace_module_has_bad_taint(mod))
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400600 return 0;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400601 mutex_lock(&tracepoint_module_list_mutex);
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400602 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
603 if (!tp_mod) {
604 ret = -ENOMEM;
605 goto end;
606 }
Steven Rostedt (Red Hat)eb7d0352014-04-08 20:09:40 -0400607 tp_mod->mod = mod;
Mathieu Desnoyers0dea6d52014-03-21 01:19:01 -0400608 list_add_tail(&tp_mod->list, &tracepoint_module_list);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400609 blocking_notifier_call_chain(&tracepoint_notify_list,
610 MODULE_STATE_COMING, tp_mod);
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400611end:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400612 mutex_unlock(&tracepoint_module_list_mutex);
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400613 return ret;
614}
615
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400616static void tracepoint_module_going(struct module *mod)
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400617{
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400618 struct tp_module *tp_mod;
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400619
Steven Rostedt (Red Hat)7dec9352014-02-26 10:54:36 -0500620 if (!mod->num_tracepoints)
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400621 return;
Steven Rostedt (Red Hat)7dec9352014-02-26 10:54:36 -0500622
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400623 mutex_lock(&tracepoint_module_list_mutex);
624 list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
Steven Rostedt (Red Hat)eb7d0352014-04-08 20:09:40 -0400625 if (tp_mod->mod == mod) {
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400626 blocking_notifier_call_chain(&tracepoint_notify_list,
627 MODULE_STATE_GOING, tp_mod);
628 list_del(&tp_mod->list);
629 kfree(tp_mod);
630 /*
631 * Called the going notifier before checking for
632 * quiescence.
633 */
Ard Biesheuvel46e0c9b2018-08-21 21:56:22 -0700634 for_each_tracepoint_range(mod->tracepoints_ptrs,
635 mod->tracepoints_ptrs + mod->num_tracepoints,
636 tp_module_going_check_quiescent, NULL);
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400637 break;
638 }
639 }
640 /*
641 * In the case of modules that were tainted at "coming", we'll simply
642 * walk through the list without finding it. We cannot use the "tainted"
643 * flag on "going", in case a module taints the kernel only after being
644 * loaded.
645 */
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400646 mutex_unlock(&tracepoint_module_list_mutex);
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400647}
Ingo Molnar227a8372008-11-16 09:50:34 +0100648
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400649static int tracepoint_module_notify(struct notifier_block *self,
650 unsigned long val, void *data)
Mathieu Desnoyers32f85742008-11-14 17:47:46 -0500651{
652 struct module *mod = data;
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400653 int ret = 0;
Mathieu Desnoyers32f85742008-11-14 17:47:46 -0500654
655 switch (val) {
656 case MODULE_STATE_COMING:
Mathieu Desnoyersb75ef8b2011-08-10 15:18:39 -0400657 ret = tracepoint_module_coming(mod);
658 break;
659 case MODULE_STATE_LIVE:
660 break;
Mathieu Desnoyers32f85742008-11-14 17:47:46 -0500661 case MODULE_STATE_GOING:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400662 tracepoint_module_going(mod);
663 break;
664 case MODULE_STATE_UNFORMED:
Mathieu Desnoyers32f85742008-11-14 17:47:46 -0500665 break;
666 }
Peter Zijlstra0340a6b2020-08-18 15:57:37 +0200667 return notifier_from_errno(ret);
Mathieu Desnoyers32f85742008-11-14 17:47:46 -0500668}
669
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400670static struct notifier_block tracepoint_module_nb = {
Mathieu Desnoyers32f85742008-11-14 17:47:46 -0500671 .notifier_call = tracepoint_module_notify,
672 .priority = 0,
673};
674
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400675static __init int init_tracepoints(void)
Mathieu Desnoyers32f85742008-11-14 17:47:46 -0500676{
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400677 int ret;
678
679 ret = register_module_notifier(&tracepoint_module_nb);
Steven Rostedt (Red Hat)eb7d0352014-04-08 20:09:40 -0400680 if (ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -0700681 pr_warn("Failed to register tracepoint module enter notifier\n");
Steven Rostedt (Red Hat)eb7d0352014-04-08 20:09:40 -0400682
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400683 return ret;
Mathieu Desnoyers32f85742008-11-14 17:47:46 -0500684}
685__initcall(init_tracepoints);
Ingo Molnar227a8372008-11-16 09:50:34 +0100686#endif /* CONFIG_MODULES */
Jason Barona871bd32009-08-10 16:52:31 -0400687
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400688/**
689 * for_each_kernel_tracepoint - iteration on all kernel tracepoints
690 * @fct: callback
691 * @priv: private data
692 */
693void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
694 void *priv)
695{
696 for_each_tracepoint_range(__start___tracepoints_ptrs,
697 __stop___tracepoints_ptrs, fct, priv);
698}
699EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
700
Josh Stone3d27d8cb2009-08-24 14:43:12 -0700701#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
Ingo Molnar60d970c2009-08-13 23:37:26 +0200702
Josh Stone97419872009-08-24 14:43:13 -0700703/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
Jason Barona871bd32009-08-10 16:52:31 -0400704static int sys_tracepoint_refcount;
705
Steven Rostedt (Red Hat)8cf868a2016-11-28 13:03:21 -0500706int syscall_regfunc(void)
Jason Barona871bd32009-08-10 16:52:31 -0400707{
Oleg Nesterov8063e412014-04-13 20:59:18 +0200708 struct task_struct *p, *t;
Jason Barona871bd32009-08-10 16:52:31 -0400709
Jason Barona871bd32009-08-10 16:52:31 -0400710 if (!sys_tracepoint_refcount) {
Oleg Nesterov8063e412014-04-13 20:59:18 +0200711 read_lock(&tasklist_lock);
712 for_each_process_thread(p, t) {
Gabriel Krisman Bertazi524666c2020-11-16 12:42:01 -0500713 set_task_syscall_work(t, SYSCALL_TRACEPOINT);
Oleg Nesterov8063e412014-04-13 20:59:18 +0200714 }
715 read_unlock(&tasklist_lock);
Jason Barona871bd32009-08-10 16:52:31 -0400716 }
717 sys_tracepoint_refcount++;
Steven Rostedt (Red Hat)8cf868a2016-11-28 13:03:21 -0500718
719 return 0;
Jason Barona871bd32009-08-10 16:52:31 -0400720}
721
722void syscall_unregfunc(void)
723{
Oleg Nesterov8063e412014-04-13 20:59:18 +0200724 struct task_struct *p, *t;
Jason Barona871bd32009-08-10 16:52:31 -0400725
Jason Barona871bd32009-08-10 16:52:31 -0400726 sys_tracepoint_refcount--;
727 if (!sys_tracepoint_refcount) {
Oleg Nesterov8063e412014-04-13 20:59:18 +0200728 read_lock(&tasklist_lock);
729 for_each_process_thread(p, t) {
Gabriel Krisman Bertazi524666c2020-11-16 12:42:01 -0500730 clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
Oleg Nesterov8063e412014-04-13 20:59:18 +0200731 }
732 read_unlock(&tasklist_lock);
Jason Barona871bd32009-08-10 16:52:31 -0400733 }
Jason Barona871bd32009-08-10 16:52:31 -0400734}
Ingo Molnar60d970c2009-08-13 23:37:26 +0200735#endif