blob: 21aa30644219e74be06b4b0f2d9d79c345dfff6b [file] [log] [blame]
Steven Rostedt (VMware)179a0cc2018-08-16 11:20:54 -04001// SPDX-License-Identifier: GPL-2.0
Alexei Starovoitov25415172015-03-25 12:49:20 -07002/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
Alexei Starovoitov0515e592016-09-01 18:37:22 -07003 * Copyright (c) 2016 Facebook
Alexei Starovoitov25415172015-03-25 12:49:20 -07004 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
Alexei Starovoitov0515e592016-09-01 18:37:22 -07009#include <linux/bpf_perf_event.h>
Alan Maguirec4d0bfb2020-09-28 12:31:05 +010010#include <linux/btf.h>
Alexei Starovoitov25415172015-03-25 12:49:20 -070011#include <linux/filter.h>
12#include <linux/uaccess.h>
Alexei Starovoitov9c959c82015-03-25 12:49:22 -070013#include <linux/ctype.h>
Josef Bacik9802d862017-12-11 11:36:48 -050014#include <linux/kprobes.h>
Alan Maguireac5a72e2020-07-13 12:52:33 +010015#include <linux/spinlock.h>
Yonghong Song41bdc4b2018-05-24 11:21:09 -070016#include <linux/syscalls.h>
Masami Hiramatsu540adea2018-01-13 02:55:03 +090017#include <linux/error-injection.h>
Jiri Olsac9a0f3b2020-07-11 23:53:24 +020018#include <linux/btf_ids.h>
KP Singh6f100642020-11-13 00:59:30 +000019#include <linux/bpf_lsm.h>
20
Martin KaFai Lau8e4597c2020-11-12 13:13:13 -080021#include <net/bpf_sk_storage.h>
Josef Bacik9802d862017-12-11 11:36:48 -050022
Alan Maguirec4d0bfb2020-09-28 12:31:05 +010023#include <uapi/linux/bpf.h>
24#include <uapi/linux/btf.h>
25
Nadav Amitc7b6f292019-04-25 17:11:43 -070026#include <asm/tlb.h>
27
Josef Bacik9802d862017-12-11 11:36:48 -050028#include "trace_probe.h"
Alexei Starovoitov25415172015-03-25 12:49:20 -070029#include "trace.h"
30
Alan Maguireac5a72e2020-07-13 12:52:33 +010031#define CREATE_TRACE_POINTS
32#include "bpf_trace.h"
33
Stanislav Fomicheve672db02019-05-28 14:14:44 -070034#define bpf_event_rcu_dereference(p) \
35 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
36
Matt Mullinsa38d1102018-12-12 16:42:37 -080037#ifdef CONFIG_MODULES
38struct bpf_trace_module {
39 struct module *module;
40 struct list_head list;
41};
42
43static LIST_HEAD(bpf_trace_modules);
44static DEFINE_MUTEX(bpf_module_mutex);
45
46static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
47{
48 struct bpf_raw_event_map *btp, *ret = NULL;
49 struct bpf_trace_module *btm;
50 unsigned int i;
51
52 mutex_lock(&bpf_module_mutex);
53 list_for_each_entry(btm, &bpf_trace_modules, list) {
54 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55 btp = &btm->module->bpf_raw_events[i];
56 if (!strcmp(btp->tp->name, name)) {
57 if (try_module_get(btm->module))
58 ret = btp;
59 goto out;
60 }
61 }
62 }
63out:
64 mutex_unlock(&bpf_module_mutex);
65 return ret;
66}
67#else
68static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69{
70 return NULL;
71}
72#endif /* CONFIG_MODULES */
73
Gianluca Borello035226b2017-10-26 01:47:42 +000074u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
Yonghong Songc195651e2018-04-28 22:28:08 -070075u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
Gianluca Borello035226b2017-10-26 01:47:42 +000076
Alan Maguireeb411372020-09-28 12:31:09 +010077static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78 u64 flags, const struct btf **btf,
79 s32 *btf_id);
80
Alexei Starovoitov25415172015-03-25 12:49:20 -070081/**
82 * trace_call_bpf - invoke BPF program
Yonghong Songe87c6bc2017-10-23 23:53:08 -070083 * @call: tracepoint event
Alexei Starovoitov25415172015-03-25 12:49:20 -070084 * @ctx: opaque context pointer
85 *
86 * kprobe handlers execute BPF programs via this helper.
87 * Can be used from static tracepoints in the future.
88 *
89 * Return: BPF programs always return an integer which is interpreted by
90 * kprobe handler as:
91 * 0 - return from kprobe (event is filtered out)
92 * 1 - store kprobe event into ring buffer
93 * Other values are reserved and currently alias to 1
94 */
Yonghong Songe87c6bc2017-10-23 23:53:08 -070095unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
Alexei Starovoitov25415172015-03-25 12:49:20 -070096{
97 unsigned int ret;
98
Thomas Gleixnerb0a81b92020-02-24 15:01:37 +010099 cant_sleep();
Alexei Starovoitov25415172015-03-25 12:49:20 -0700100
101 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102 /*
103 * since some bpf program is already running on this cpu,
104 * don't call into another bpf program (same or different)
105 * and don't send kprobe event into ring-buffer,
106 * so return zero here
107 */
108 ret = 0;
109 goto out;
110 }
111
Yonghong Songe87c6bc2017-10-23 23:53:08 -0700112 /*
113 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114 * to all call sites, we did a bpf_prog_array_valid() there to check
115 * whether call->prog_array is empty or not, which is
Qiujun Huang2b5894c2020-10-29 23:05:54 +0800116 * a heuristic to speed up execution.
Yonghong Songe87c6bc2017-10-23 23:53:08 -0700117 *
118 * If bpf_prog_array_valid() fetched prog_array was
119 * non-NULL, we go into trace_call_bpf() and do the actual
120 * proper rcu_dereference() under RCU lock.
121 * If it turns out that prog_array is NULL then, we bail out.
122 * For the opposite, if the bpf_prog_array_valid() fetched pointer
123 * was NULL, you'll skip the prog_array with the risk of missing
124 * out of events when it was updated in between this and the
125 * rcu_dereference() which is accepted risk.
126 */
Andrii Nakryiko7d08c2c2021-08-15 00:05:55 -0700127 ret = BPF_PROG_RUN_ARRAY(call->prog_array, ctx, bpf_prog_run);
Alexei Starovoitov25415172015-03-25 12:49:20 -0700128
129 out:
130 __this_cpu_dec(bpf_prog_active);
Alexei Starovoitov25415172015-03-25 12:49:20 -0700131
132 return ret;
133}
Alexei Starovoitov25415172015-03-25 12:49:20 -0700134
Josef Bacik9802d862017-12-11 11:36:48 -0500135#ifdef CONFIG_BPF_KPROBE_OVERRIDE
136BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137{
Josef Bacik9802d862017-12-11 11:36:48 -0500138 regs_set_return_value(regs, rc);
Masami Hiramatsu540adea2018-01-13 02:55:03 +0900139 override_function_with_return(regs);
Josef Bacik9802d862017-12-11 11:36:48 -0500140 return 0;
141}
142
143static const struct bpf_func_proto bpf_override_return_proto = {
144 .func = bpf_override_return,
145 .gpl_only = true,
146 .ret_type = RET_INTEGER,
147 .arg1_type = ARG_PTR_TO_CTX,
148 .arg2_type = ARG_ANYTHING,
149};
150#endif
151
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700152static __always_inline int
153bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
154{
155 int ret;
156
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200157 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700158 if (unlikely(ret < 0))
159 memset(dst, 0, size);
160 return ret;
161}
162
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100163BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 const void __user *, unsafe_ptr)
Alexei Starovoitov25415172015-03-25 12:49:20 -0700165{
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700166 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
Alexei Starovoitov25415172015-03-25 12:49:20 -0700167}
168
John Fastabendf4703782020-05-24 09:50:55 -0700169const struct bpf_func_proto bpf_probe_read_user_proto = {
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100170 .func = bpf_probe_read_user,
171 .gpl_only = true,
172 .ret_type = RET_INTEGER,
173 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
174 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
175 .arg3_type = ARG_ANYTHING,
176};
177
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700178static __always_inline int
179bpf_probe_read_user_str_common(void *dst, u32 size,
180 const void __user *unsafe_ptr)
181{
182 int ret;
183
Daniel Xu6fa6d282020-11-17 12:05:45 -0800184 /*
185 * NB: We rely on strncpy_from_user() not copying junk past the NUL
186 * terminator into `dst`.
187 *
188 * strncpy_from_user() does long-sized strides in the fast path. If the
189 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190 * then there could be junk after the NUL in `dst`. If user takes `dst`
191 * and keys a hash map with it, then semantically identical strings can
192 * occupy multiple entries in the map.
193 */
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700194 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195 if (unlikely(ret < 0))
196 memset(dst, 0, size);
197 return ret;
198}
199
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100200BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201 const void __user *, unsafe_ptr)
202{
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700203 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100204}
205
John Fastabendf4703782020-05-24 09:50:55 -0700206const struct bpf_func_proto bpf_probe_read_user_str_proto = {
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100207 .func = bpf_probe_read_user_str,
208 .gpl_only = true,
209 .ret_type = RET_INTEGER,
210 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
211 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
212 .arg3_type = ARG_ANYTHING,
213};
214
215static __always_inline int
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700216bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100217{
Daniel Borkmannff40e512021-05-28 09:16:31 +0000218 int ret;
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100219
Christoph Hellwigfe557312020-06-17 09:37:53 +0200220 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100221 if (unlikely(ret < 0))
Daniel Borkmannff40e512021-05-28 09:16:31 +0000222 memset(dst, 0, size);
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100223 return ret;
224}
225
226BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
227 const void *, unsafe_ptr)
228{
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700229 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100230}
231
John Fastabendf4703782020-05-24 09:50:55 -0700232const struct bpf_func_proto bpf_probe_read_kernel_proto = {
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100233 .func = bpf_probe_read_kernel,
234 .gpl_only = true,
235 .ret_type = RET_INTEGER,
236 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
237 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
238 .arg3_type = ARG_ANYTHING,
239};
240
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100241static __always_inline int
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700242bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100243{
Daniel Borkmannff40e512021-05-28 09:16:31 +0000244 int ret;
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700245
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100246 /*
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700247 * The strncpy_from_kernel_nofault() call will likely not fill the
248 * entire buffer, but that's okay in this circumstance as we're probing
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100249 * arbitrary memory anyway similar to bpf_probe_read_*() and might
250 * as well probe the stack. Thus, memory is explicitly cleared
251 * only in error case, so that improper users ignoring return
252 * code altogether don't copy garbage; otherwise length of string
253 * is returned that can be used for bpf_perf_event_output() et al.
254 */
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700255 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100256 if (unlikely(ret < 0))
Daniel Borkmannff40e512021-05-28 09:16:31 +0000257 memset(dst, 0, size);
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100258 return ret;
259}
260
261BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
262 const void *, unsafe_ptr)
263{
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700264 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100265}
266
John Fastabendf4703782020-05-24 09:50:55 -0700267const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100268 .func = bpf_probe_read_kernel_str,
269 .gpl_only = true,
270 .ret_type = RET_INTEGER,
271 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
272 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
273 .arg3_type = ARG_ANYTHING,
274};
275
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700276#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
277BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
278 const void *, unsafe_ptr)
279{
280 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
281 return bpf_probe_read_user_common(dst, size,
282 (__force void __user *)unsafe_ptr);
283 }
284 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
285}
286
287static const struct bpf_func_proto bpf_probe_read_compat_proto = {
288 .func = bpf_probe_read_compat,
289 .gpl_only = true,
290 .ret_type = RET_INTEGER,
291 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
292 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
293 .arg3_type = ARG_ANYTHING,
294};
295
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100296BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
297 const void *, unsafe_ptr)
298{
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700299 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
300 return bpf_probe_read_user_str_common(dst, size,
301 (__force void __user *)unsafe_ptr);
302 }
303 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +0100304}
305
306static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
307 .func = bpf_probe_read_compat_str,
Alexei Starovoitov25415172015-03-25 12:49:20 -0700308 .gpl_only = true,
309 .ret_type = RET_INTEGER,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -0800310 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
Yonghong Song9c019e22017-11-12 14:49:10 -0800311 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
Alexei Starovoitov25415172015-03-25 12:49:20 -0700312 .arg3_type = ARG_ANYTHING,
313};
Christoph Hellwig8d92db52020-06-08 21:34:40 -0700314#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
Alexei Starovoitov25415172015-03-25 12:49:20 -0700315
Daniel Borkmanneb1b6682019-11-02 00:17:58 +0100316BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200317 u32, size)
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700318{
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700319 /*
320 * Ensure we're in user context which is safe for the helper to
321 * run. This helper has no business in a kthread.
322 *
323 * access_ok() should prevent writing to non-user memory, but in
324 * some situations (nommu, temporary switch, etc) access_ok() does
325 * not provide enough validation, hence the check on KERNEL_DS.
Nadav Amitc7b6f292019-04-25 17:11:43 -0700326 *
327 * nmi_uaccess_okay() ensures the probe is not run in an interim
328 * state, when the task or mm are switched. This is specifically
329 * required to prevent the use of temporary mm.
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700330 */
331
332 if (unlikely(in_interrupt() ||
333 current->flags & (PF_KTHREAD | PF_EXITING)))
334 return -EPERM;
Al Virodb68ce12017-03-20 21:08:07 -0400335 if (unlikely(uaccess_kernel()))
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700336 return -EPERM;
Nadav Amitc7b6f292019-04-25 17:11:43 -0700337 if (unlikely(!nmi_uaccess_okay()))
338 return -EPERM;
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700339
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200340 return copy_to_user_nofault(unsafe_ptr, src, size);
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700341}
342
343static const struct bpf_func_proto bpf_probe_write_user_proto = {
344 .func = bpf_probe_write_user,
345 .gpl_only = true,
346 .ret_type = RET_INTEGER,
347 .arg1_type = ARG_ANYTHING,
Hao Luo216e3cd2021-12-16 16:31:51 -0800348 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -0800349 .arg3_type = ARG_CONST_SIZE,
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700350};
351
352static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
353{
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -0700354 if (!capable(CAP_SYS_ADMIN))
355 return NULL;
356
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700357 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
358 current->comm, task_pid_nr(current));
359
360 return &bpf_probe_write_user_proto;
361}
362
Alan Maguireac5a72e2020-07-13 12:52:33 +0100363static DEFINE_RAW_SPINLOCK(trace_printk_lock);
364
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200365#define MAX_TRACE_PRINTK_VARARGS 3
366#define BPF_TRACE_PRINTK_SIZE 1024
Alan Maguireac5a72e2020-07-13 12:52:33 +0100367
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200368BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
369 u64, arg2, u64, arg3)
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700370{
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200371 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
Florent Revest48cac3f2021-04-27 19:43:13 +0200372 u32 *bin_args;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200373 static char buf[BPF_TRACE_PRINTK_SIZE];
374 unsigned long flags;
375 int ret;
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700376
Florent Revest48cac3f2021-04-27 19:43:13 +0200377 ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
378 MAX_TRACE_PRINTK_VARARGS);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200379 if (ret < 0)
380 return ret;
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700381
Florent Revest38d26d82021-04-27 13:29:58 +0200382 raw_spin_lock_irqsave(&trace_printk_lock, flags);
Florent Revest48cac3f2021-04-27 19:43:13 +0200383 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700384
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200385 trace_bpf_trace_printk(buf);
386 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700387
Florent Revest48cac3f2021-04-27 19:43:13 +0200388 bpf_bprintf_cleanup();
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700389
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200390 return ret;
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700391}
392
393static const struct bpf_func_proto bpf_trace_printk_proto = {
394 .func = bpf_trace_printk,
395 .gpl_only = true,
396 .ret_type = RET_INTEGER,
Hao Luo216e3cd2021-12-16 16:31:51 -0800397 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -0800398 .arg2_type = ARG_CONST_SIZE,
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700399};
400
Dave Marchevsky10aceb62021-09-17 11:29:05 -0700401static void __set_printk_clr_event(void)
Alexei Starovoitov0756ea32015-06-12 19:39:13 -0700402{
403 /*
Alan Maguireac5a72e2020-07-13 12:52:33 +0100404 * This program might be calling bpf_trace_printk,
405 * so enable the associated bpf_trace/bpf_trace_printk event.
406 * Repeat this each time as it is possible a user has
407 * disabled bpf_trace_printk events. By loading a program
408 * calling bpf_trace_printk() however the user has expressed
409 * the intent to see such events.
Alexei Starovoitov0756ea32015-06-12 19:39:13 -0700410 */
Alan Maguireac5a72e2020-07-13 12:52:33 +0100411 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
412 pr_warn_ratelimited("could not enable bpf_trace_printk events");
Dave Marchevsky10aceb62021-09-17 11:29:05 -0700413}
Alexei Starovoitov0756ea32015-06-12 19:39:13 -0700414
Dave Marchevsky10aceb62021-09-17 11:29:05 -0700415const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
416{
417 __set_printk_clr_event();
Alexei Starovoitov0756ea32015-06-12 19:39:13 -0700418 return &bpf_trace_printk_proto;
419}
420
Dave Marchevsky10aceb62021-09-17 11:29:05 -0700421BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
422 u32, data_len)
423{
424 static char buf[BPF_TRACE_PRINTK_SIZE];
425 unsigned long flags;
426 int ret, num_args;
427 u32 *bin_args;
428
429 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
430 (data_len && !data))
431 return -EINVAL;
432 num_args = data_len / 8;
433
434 ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
435 if (ret < 0)
436 return ret;
437
438 raw_spin_lock_irqsave(&trace_printk_lock, flags);
439 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
440
441 trace_bpf_trace_printk(buf);
442 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
443
444 bpf_bprintf_cleanup();
445
446 return ret;
447}
448
449static const struct bpf_func_proto bpf_trace_vprintk_proto = {
450 .func = bpf_trace_vprintk,
451 .gpl_only = true,
452 .ret_type = RET_INTEGER,
Hao Luo216e3cd2021-12-16 16:31:51 -0800453 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Dave Marchevsky10aceb62021-09-17 11:29:05 -0700454 .arg2_type = ARG_CONST_SIZE,
Hao Luo216e3cd2021-12-16 16:31:51 -0800455 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
Dave Marchevsky10aceb62021-09-17 11:29:05 -0700456 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
457};
458
459const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
460{
461 __set_printk_clr_event();
462 return &bpf_trace_vprintk_proto;
463}
464
Yonghong Song492e6392020-05-09 10:59:14 -0700465BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
466 const void *, data, u32, data_len)
467{
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200468 int err, num_args;
Florent Revest48cac3f2021-04-27 19:43:13 +0200469 u32 *bin_args;
Yonghong Song492e6392020-05-09 10:59:14 -0700470
Dave Marchevsky335ff492021-09-17 11:29:03 -0700471 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200472 (data_len && !data))
473 return -EINVAL;
Yonghong Song492e6392020-05-09 10:59:14 -0700474 num_args = data_len / 8;
475
Florent Revest48cac3f2021-04-27 19:43:13 +0200476 err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200477 if (err < 0)
478 return err;
Yonghong Song492e6392020-05-09 10:59:14 -0700479
Florent Revest48cac3f2021-04-27 19:43:13 +0200480 seq_bprintf(m, fmt, bin_args);
Yonghong Song492e6392020-05-09 10:59:14 -0700481
Florent Revest48cac3f2021-04-27 19:43:13 +0200482 bpf_bprintf_cleanup();
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200483
484 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
Yonghong Song492e6392020-05-09 10:59:14 -0700485}
486
Lorenz Bauer9436ef62020-09-21 13:12:20 +0100487BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
Jiri Olsac9a0f3b2020-07-11 23:53:24 +0200488
Yonghong Song492e6392020-05-09 10:59:14 -0700489static const struct bpf_func_proto bpf_seq_printf_proto = {
490 .func = bpf_seq_printf,
491 .gpl_only = true,
492 .ret_type = RET_INTEGER,
493 .arg1_type = ARG_PTR_TO_BTF_ID,
Lorenz Bauer9436ef62020-09-21 13:12:20 +0100494 .arg1_btf_id = &btf_seq_file_ids[0],
Hao Luo216e3cd2021-12-16 16:31:51 -0800495 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Yonghong Song492e6392020-05-09 10:59:14 -0700496 .arg3_type = ARG_CONST_SIZE,
Hao Luo216e3cd2021-12-16 16:31:51 -0800497 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
Yonghong Song492e6392020-05-09 10:59:14 -0700498 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
Yonghong Song492e6392020-05-09 10:59:14 -0700499};
500
501BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
502{
503 return seq_write(m, data, len) ? -EOVERFLOW : 0;
504}
505
Yonghong Song492e6392020-05-09 10:59:14 -0700506static const struct bpf_func_proto bpf_seq_write_proto = {
507 .func = bpf_seq_write,
508 .gpl_only = true,
509 .ret_type = RET_INTEGER,
510 .arg1_type = ARG_PTR_TO_BTF_ID,
Lorenz Bauer9436ef62020-09-21 13:12:20 +0100511 .arg1_btf_id = &btf_seq_file_ids[0],
Hao Luo216e3cd2021-12-16 16:31:51 -0800512 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Yonghong Song492e6392020-05-09 10:59:14 -0700513 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
Yonghong Song492e6392020-05-09 10:59:14 -0700514};
515
Alan Maguireeb411372020-09-28 12:31:09 +0100516BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
517 u32, btf_ptr_size, u64, flags)
518{
519 const struct btf *btf;
520 s32 btf_id;
521 int ret;
522
523 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
524 if (ret)
525 return ret;
526
527 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
528}
529
530static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
531 .func = bpf_seq_printf_btf,
532 .gpl_only = true,
533 .ret_type = RET_INTEGER,
534 .arg1_type = ARG_PTR_TO_BTF_ID,
535 .arg1_btf_id = &btf_seq_file_ids[0],
Hao Luo216e3cd2021-12-16 16:31:51 -0800536 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Alan Maguireeb411372020-09-28 12:31:09 +0100537 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
538 .arg4_type = ARG_ANYTHING,
Alexei Starovoitovd9847d32015-03-25 12:49:21 -0700539};
540
Yonghong Song908432c2017-10-05 09:19:20 -0700541static __always_inline int
542get_map_perf_counter(struct bpf_map *map, u64 flags,
543 u64 *value, u64 *enabled, u64 *running)
Kaixu Xia35578d72015-08-06 07:02:35 +0000544{
Kaixu Xia35578d72015-08-06 07:02:35 +0000545 struct bpf_array *array = container_of(map, struct bpf_array, map);
Daniel Borkmann6816a7f2016-06-28 12:18:25 +0200546 unsigned int cpu = smp_processor_id();
547 u64 index = flags & BPF_F_INDEX_MASK;
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200548 struct bpf_event_entry *ee;
Kaixu Xia35578d72015-08-06 07:02:35 +0000549
Daniel Borkmann6816a7f2016-06-28 12:18:25 +0200550 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
551 return -EINVAL;
552 if (index == BPF_F_CURRENT_CPU)
553 index = cpu;
Kaixu Xia35578d72015-08-06 07:02:35 +0000554 if (unlikely(index >= array->map.max_entries))
555 return -E2BIG;
556
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200557 ee = READ_ONCE(array->ptrs[index]);
Daniel Borkmann1ca1cc92016-06-28 12:18:23 +0200558 if (!ee)
Kaixu Xia35578d72015-08-06 07:02:35 +0000559 return -ENOENT;
560
Yonghong Song908432c2017-10-05 09:19:20 -0700561 return perf_event_read_local(ee->event, value, enabled, running);
562}
563
564BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
565{
566 u64 value = 0;
567 int err;
568
569 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
Kaixu Xia35578d72015-08-06 07:02:35 +0000570 /*
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700571 * this api is ugly since we miss [-22..-2] range of valid
572 * counter values, but that's uapi
Kaixu Xia35578d72015-08-06 07:02:35 +0000573 */
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700574 if (err)
575 return err;
576 return value;
Kaixu Xia35578d72015-08-06 07:02:35 +0000577}
578
Alexei Starovoitov62544ce2015-10-22 17:10:14 -0700579static const struct bpf_func_proto bpf_perf_event_read_proto = {
Kaixu Xia35578d72015-08-06 07:02:35 +0000580 .func = bpf_perf_event_read,
Alexei Starovoitov1075ef52015-10-23 14:58:19 -0700581 .gpl_only = true,
Kaixu Xia35578d72015-08-06 07:02:35 +0000582 .ret_type = RET_INTEGER,
583 .arg1_type = ARG_CONST_MAP_PTR,
584 .arg2_type = ARG_ANYTHING,
585};
586
Yonghong Song908432c2017-10-05 09:19:20 -0700587BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
588 struct bpf_perf_event_value *, buf, u32, size)
589{
590 int err = -EINVAL;
591
592 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
593 goto clear;
594 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
595 &buf->running);
596 if (unlikely(err))
597 goto clear;
598 return 0;
599clear:
600 memset(buf, 0, size);
601 return err;
602}
603
604static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
605 .func = bpf_perf_event_read_value,
606 .gpl_only = true,
607 .ret_type = RET_INTEGER,
608 .arg1_type = ARG_CONST_MAP_PTR,
609 .arg2_type = ARG_ANYTHING,
610 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
611 .arg4_type = ARG_CONST_SIZE,
612};
613
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200614static __always_inline u64
615__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
Daniel Borkmann283ca522017-12-12 02:25:30 +0100616 u64 flags, struct perf_sample_data *sd)
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700617{
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700618 struct bpf_array *array = container_of(map, struct bpf_array, map);
Daniel Borkmannd7931332016-06-28 12:18:24 +0200619 unsigned int cpu = smp_processor_id();
Daniel Borkmann1e337592016-04-18 21:01:23 +0200620 u64 index = flags & BPF_F_INDEX_MASK;
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200621 struct bpf_event_entry *ee;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700622 struct perf_event *event;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700623
Daniel Borkmann1e337592016-04-18 21:01:23 +0200624 if (index == BPF_F_CURRENT_CPU)
Daniel Borkmannd7931332016-06-28 12:18:24 +0200625 index = cpu;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700626 if (unlikely(index >= array->map.max_entries))
627 return -E2BIG;
628
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200629 ee = READ_ONCE(array->ptrs[index]);
Daniel Borkmann1ca1cc92016-06-28 12:18:23 +0200630 if (!ee)
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700631 return -ENOENT;
632
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200633 event = ee->event;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700634 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
635 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
636 return -EINVAL;
637
Daniel Borkmannd7931332016-06-28 12:18:24 +0200638 if (unlikely(event->oncpu != cpu))
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700639 return -EOPNOTSUPP;
640
Arnaldo Carvalho de Melo56201962019-01-11 13:20:20 -0300641 return perf_event_output(event, sd, regs);
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700642}
643
Matt Mullins9594dc32019-06-11 14:53:04 -0700644/*
645 * Support executing tracepoints in normal, irq, and nmi context that each call
646 * bpf_perf_event_output
647 */
648struct bpf_trace_sample_data {
649 struct perf_sample_data sds[3];
650};
651
652static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
653static DEFINE_PER_CPU(int, bpf_trace_nest_level);
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200654BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
655 u64, flags, void *, data, u64, size)
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200656{
Matt Mullins9594dc32019-06-11 14:53:04 -0700657 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
658 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200659 struct perf_raw_record raw = {
660 .frag = {
661 .size = size,
662 .data = data,
663 },
664 };
Matt Mullins9594dc32019-06-11 14:53:04 -0700665 struct perf_sample_data *sd;
666 int err;
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200667
Matt Mullins9594dc32019-06-11 14:53:04 -0700668 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
669 err = -EBUSY;
670 goto out;
671 }
672
673 sd = &sds->sds[nest_level - 1];
674
675 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
676 err = -EINVAL;
677 goto out;
678 }
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200679
Daniel Borkmann283ca522017-12-12 02:25:30 +0100680 perf_sample_data_init(sd, 0, 0);
681 sd->raw = &raw;
682
Matt Mullins9594dc32019-06-11 14:53:04 -0700683 err = __bpf_perf_event_output(regs, map, flags, sd);
684
685out:
686 this_cpu_dec(bpf_trace_nest_level);
687 return err;
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200688}
689
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700690static const struct bpf_func_proto bpf_perf_event_output_proto = {
691 .func = bpf_perf_event_output,
Alexei Starovoitov1075ef52015-10-23 14:58:19 -0700692 .gpl_only = true,
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700693 .ret_type = RET_INTEGER,
694 .arg1_type = ARG_PTR_TO_CTX,
695 .arg2_type = ARG_CONST_MAP_PTR,
696 .arg3_type = ARG_ANYTHING,
Hao Luo216e3cd2021-12-16 16:31:51 -0800697 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Gianluca Borelloa60dd352017-11-22 18:32:56 +0000698 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700699};
700
Allan Zhang768fb612019-09-25 16:43:12 -0700701static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
702struct bpf_nested_pt_regs {
703 struct pt_regs regs[3];
704};
705static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
706static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200707
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200708u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
709 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200710{
Allan Zhang768fb612019-09-25 16:43:12 -0700711 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200712 struct perf_raw_frag frag = {
713 .copy = ctx_copy,
714 .size = ctx_size,
715 .data = ctx,
716 };
717 struct perf_raw_record raw = {
718 .frag = {
Andrew Morton183fc152016-07-18 15:50:58 -0700719 {
720 .next = ctx_size ? &frag : NULL,
721 },
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200722 .size = meta_size,
723 .data = meta,
724 },
725 };
Allan Zhang768fb612019-09-25 16:43:12 -0700726 struct perf_sample_data *sd;
727 struct pt_regs *regs;
728 u64 ret;
729
730 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
731 ret = -EBUSY;
732 goto out;
733 }
734 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
735 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200736
737 perf_fetch_caller_regs(regs);
Daniel Borkmann283ca522017-12-12 02:25:30 +0100738 perf_sample_data_init(sd, 0, 0);
739 sd->raw = &raw;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200740
Allan Zhang768fb612019-09-25 16:43:12 -0700741 ret = __bpf_perf_event_output(regs, map, flags, sd);
742out:
743 this_cpu_dec(bpf_event_output_nest_level);
744 return ret;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200745}
746
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200747BPF_CALL_0(bpf_get_current_task)
Alexei Starovoitov606274c2016-07-06 22:38:36 -0700748{
749 return (long) current;
750}
751
John Fastabendf4703782020-05-24 09:50:55 -0700752const struct bpf_func_proto bpf_get_current_task_proto = {
Alexei Starovoitov606274c2016-07-06 22:38:36 -0700753 .func = bpf_get_current_task,
754 .gpl_only = true,
755 .ret_type = RET_INTEGER,
756};
757
KP Singh3ca10322020-11-06 10:37:43 +0000758BPF_CALL_0(bpf_get_current_task_btf)
759{
760 return (unsigned long) current;
761}
762
Daniel Xua396eda2021-08-23 19:43:48 -0700763const struct bpf_func_proto bpf_get_current_task_btf_proto = {
KP Singh3ca10322020-11-06 10:37:43 +0000764 .func = bpf_get_current_task_btf,
765 .gpl_only = true,
766 .ret_type = RET_PTR_TO_BTF_ID,
Song Liud19ddb42021-11-12 07:02:43 -0800767 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
KP Singh3ca10322020-11-06 10:37:43 +0000768};
769
Daniel Xudd6e10f2021-08-23 19:43:49 -0700770BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
771{
772 return (unsigned long) task_pt_regs(task);
773}
774
775BTF_ID_LIST(bpf_task_pt_regs_ids)
776BTF_ID(struct, pt_regs)
777
778const struct bpf_func_proto bpf_task_pt_regs_proto = {
779 .func = bpf_task_pt_regs,
780 .gpl_only = true,
781 .arg1_type = ARG_PTR_TO_BTF_ID,
Song Liud19ddb42021-11-12 07:02:43 -0800782 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
Daniel Xudd6e10f2021-08-23 19:43:49 -0700783 .ret_type = RET_PTR_TO_BTF_ID,
784 .ret_btf_id = &bpf_task_pt_regs_ids[0],
785};
786
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200787BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700788{
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700789 struct bpf_array *array = container_of(map, struct bpf_array, map);
790 struct cgroup *cgrp;
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700791
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700792 if (unlikely(idx >= array->map.max_entries))
793 return -E2BIG;
794
795 cgrp = READ_ONCE(array->ptrs[idx]);
796 if (unlikely(!cgrp))
797 return -EAGAIN;
798
799 return task_under_cgroup_hierarchy(current, cgrp);
800}
801
802static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
803 .func = bpf_current_task_under_cgroup,
804 .gpl_only = false,
805 .ret_type = RET_INTEGER,
806 .arg1_type = ARG_CONST_MAP_PTR,
807 .arg2_type = ARG_ANYTHING,
808};
809
Yonghong Song8b401f92019-05-23 14:47:45 -0700810struct send_signal_irq_work {
811 struct irq_work irq_work;
812 struct task_struct *task;
813 u32 sig;
Yonghong Song84829412020-01-14 19:50:02 -0800814 enum pid_type type;
Yonghong Song8b401f92019-05-23 14:47:45 -0700815};
816
817static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
818
819static void do_bpf_send_signal(struct irq_work *entry)
820{
821 struct send_signal_irq_work *work;
822
823 work = container_of(entry, struct send_signal_irq_work, irq_work);
Yonghong Song84829412020-01-14 19:50:02 -0800824 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
Yonghong Song8b401f92019-05-23 14:47:45 -0700825}
826
Yonghong Song84829412020-01-14 19:50:02 -0800827static int bpf_send_signal_common(u32 sig, enum pid_type type)
Yonghong Song8b401f92019-05-23 14:47:45 -0700828{
829 struct send_signal_irq_work *work = NULL;
830
831 /* Similar to bpf_probe_write_user, task needs to be
832 * in a sound condition and kernel memory access be
833 * permitted in order to send signal to the current
834 * task.
835 */
836 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
837 return -EPERM;
838 if (unlikely(uaccess_kernel()))
839 return -EPERM;
840 if (unlikely(!nmi_uaccess_okay()))
841 return -EPERM;
842
Yonghong Song1bc78962020-03-04 11:11:04 -0800843 if (irqs_disabled()) {
Yonghong Songe1afb7022019-05-25 11:57:53 -0700844 /* Do an early check on signal validity. Otherwise,
845 * the error is lost in deferred irq_work.
846 */
847 if (unlikely(!valid_signal(sig)))
848 return -EINVAL;
849
Yonghong Song8b401f92019-05-23 14:47:45 -0700850 work = this_cpu_ptr(&send_signal_work);
Peter Zijlstra7a9f50a2020-06-15 11:51:29 +0200851 if (irq_work_is_busy(&work->irq_work))
Yonghong Song8b401f92019-05-23 14:47:45 -0700852 return -EBUSY;
853
854 /* Add the current task, which is the target of sending signal,
855 * to the irq_work. The current task may change when queued
856 * irq works get executed.
857 */
858 work->task = current;
859 work->sig = sig;
Yonghong Song84829412020-01-14 19:50:02 -0800860 work->type = type;
Yonghong Song8b401f92019-05-23 14:47:45 -0700861 irq_work_queue(&work->irq_work);
862 return 0;
863 }
864
Yonghong Song84829412020-01-14 19:50:02 -0800865 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
866}
867
868BPF_CALL_1(bpf_send_signal, u32, sig)
869{
870 return bpf_send_signal_common(sig, PIDTYPE_TGID);
Yonghong Song8b401f92019-05-23 14:47:45 -0700871}
872
873static const struct bpf_func_proto bpf_send_signal_proto = {
874 .func = bpf_send_signal,
875 .gpl_only = false,
876 .ret_type = RET_INTEGER,
877 .arg1_type = ARG_ANYTHING,
878};
879
Yonghong Song84829412020-01-14 19:50:02 -0800880BPF_CALL_1(bpf_send_signal_thread, u32, sig)
881{
882 return bpf_send_signal_common(sig, PIDTYPE_PID);
883}
884
885static const struct bpf_func_proto bpf_send_signal_thread_proto = {
886 .func = bpf_send_signal_thread,
887 .gpl_only = false,
888 .ret_type = RET_INTEGER,
889 .arg1_type = ARG_ANYTHING,
890};
891
Jiri Olsa6e22ab92020-08-25 21:21:20 +0200892BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
893{
894 long len;
895 char *p;
896
897 if (!sz)
898 return 0;
899
900 p = d_path(path, buf, sz);
901 if (IS_ERR(p)) {
902 len = PTR_ERR(p);
903 } else {
904 len = buf + sz - p;
905 memmove(buf, p, len);
906 }
907
908 return len;
909}
910
911BTF_SET_START(btf_allowlist_d_path)
Jiri Olsaa8a71792020-09-18 13:23:38 +0200912#ifdef CONFIG_SECURITY
913BTF_ID(func, security_file_permission)
914BTF_ID(func, security_inode_getattr)
915BTF_ID(func, security_file_open)
916#endif
917#ifdef CONFIG_SECURITY_PATH
918BTF_ID(func, security_path_truncate)
919#endif
Jiri Olsa6e22ab92020-08-25 21:21:20 +0200920BTF_ID(func, vfs_truncate)
921BTF_ID(func, vfs_fallocate)
922BTF_ID(func, dentry_open)
923BTF_ID(func, vfs_getattr)
924BTF_ID(func, filp_close)
925BTF_SET_END(btf_allowlist_d_path)
926
927static bool bpf_d_path_allowed(const struct bpf_prog *prog)
928{
Song Liu3d06f342021-02-12 10:31:06 -0800929 if (prog->type == BPF_PROG_TYPE_TRACING &&
930 prog->expected_attach_type == BPF_TRACE_ITER)
931 return true;
932
KP Singh6f100642020-11-13 00:59:30 +0000933 if (prog->type == BPF_PROG_TYPE_LSM)
934 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
935
936 return btf_id_set_contains(&btf_allowlist_d_path,
937 prog->aux->attach_btf_id);
Jiri Olsa6e22ab92020-08-25 21:21:20 +0200938}
939
Lorenz Bauer9436ef62020-09-21 13:12:20 +0100940BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
Jiri Olsa6e22ab92020-08-25 21:21:20 +0200941
942static const struct bpf_func_proto bpf_d_path_proto = {
943 .func = bpf_d_path,
944 .gpl_only = false,
945 .ret_type = RET_INTEGER,
946 .arg1_type = ARG_PTR_TO_BTF_ID,
Lorenz Bauer9436ef62020-09-21 13:12:20 +0100947 .arg1_btf_id = &bpf_d_path_btf_ids[0],
Jiri Olsa6e22ab92020-08-25 21:21:20 +0200948 .arg2_type = ARG_PTR_TO_MEM,
949 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
Jiri Olsa6e22ab92020-08-25 21:21:20 +0200950 .allowed = bpf_d_path_allowed,
951};
952
Alan Maguirec4d0bfb2020-09-28 12:31:05 +0100953#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
954 BTF_F_PTR_RAW | BTF_F_ZERO)
955
956static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
957 u64 flags, const struct btf **btf,
958 s32 *btf_id)
959{
960 const struct btf_type *t;
961
962 if (unlikely(flags & ~(BTF_F_ALL)))
963 return -EINVAL;
964
965 if (btf_ptr_size != sizeof(struct btf_ptr))
966 return -EINVAL;
967
968 *btf = bpf_get_btf_vmlinux();
969
970 if (IS_ERR_OR_NULL(*btf))
Wang Qingabbaa432020-11-07 15:45:44 +0800971 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
Alan Maguirec4d0bfb2020-09-28 12:31:05 +0100972
973 if (ptr->type_id > 0)
974 *btf_id = ptr->type_id;
975 else
976 return -EINVAL;
977
978 if (*btf_id > 0)
979 t = btf_type_by_id(*btf, *btf_id);
980 if (*btf_id <= 0 || !t)
981 return -ENOENT;
982
983 return 0;
984}
985
986BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
987 u32, btf_ptr_size, u64, flags)
988{
989 const struct btf *btf;
990 s32 btf_id;
991 int ret;
992
993 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
994 if (ret)
995 return ret;
996
997 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
998 flags);
999}
1000
1001const struct bpf_func_proto bpf_snprintf_btf_proto = {
1002 .func = bpf_snprintf_btf,
1003 .gpl_only = false,
1004 .ret_type = RET_INTEGER,
1005 .arg1_type = ARG_PTR_TO_MEM,
1006 .arg2_type = ARG_CONST_SIZE,
Hao Luo216e3cd2021-12-16 16:31:51 -08001007 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Alan Maguirec4d0bfb2020-09-28 12:31:05 +01001008 .arg4_type = ARG_CONST_SIZE,
1009 .arg5_type = ARG_ANYTHING,
1010};
1011
Jiri Olsa9b99edc2021-07-14 11:43:55 +02001012BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1013{
1014 /* This helper call is inlined by verifier. */
Jiri Olsaf92c1e12021-12-08 20:32:44 +01001015 return ((u64 *)ctx)[-2];
Jiri Olsa9b99edc2021-07-14 11:43:55 +02001016}
1017
1018static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1019 .func = bpf_get_func_ip_tracing,
1020 .gpl_only = true,
1021 .ret_type = RET_INTEGER,
1022 .arg1_type = ARG_PTR_TO_CTX,
1023};
1024
Jiri Olsa9ffd9f32021-07-14 11:43:56 +02001025BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1026{
1027 struct kprobe *kp = kprobe_running();
1028
Arnd Bergmann16c59002021-07-21 23:19:45 +02001029 return kp ? (uintptr_t)kp->addr : 0;
Jiri Olsa9ffd9f32021-07-14 11:43:56 +02001030}
1031
1032static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1033 .func = bpf_get_func_ip_kprobe,
1034 .gpl_only = true,
1035 .ret_type = RET_INTEGER,
1036 .arg1_type = ARG_PTR_TO_CTX,
1037};
1038
Andrii Nakryiko7adfc6c2021-08-15 00:05:59 -07001039BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1040{
1041 struct bpf_trace_run_ctx *run_ctx;
1042
1043 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1044 return run_ctx->bpf_cookie;
1045}
1046
1047static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1048 .func = bpf_get_attach_cookie_trace,
1049 .gpl_only = false,
1050 .ret_type = RET_INTEGER,
1051 .arg1_type = ARG_PTR_TO_CTX,
1052};
1053
1054BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1055{
1056 return ctx->event->bpf_cookie;
1057}
1058
1059static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1060 .func = bpf_get_attach_cookie_pe,
1061 .gpl_only = false,
1062 .ret_type = RET_INTEGER,
1063 .arg1_type = ARG_PTR_TO_CTX,
1064};
1065
Song Liu856c02d2021-09-10 11:33:51 -07001066BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1067{
1068#ifndef CONFIG_X86
1069 return -ENOENT;
1070#else
1071 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1072 u32 entry_cnt = size / br_entry_size;
1073
1074 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1075
1076 if (unlikely(flags))
1077 return -EINVAL;
1078
1079 if (!entry_cnt)
1080 return -ENOENT;
1081
1082 return entry_cnt * br_entry_size;
1083#endif
1084}
1085
1086static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1087 .func = bpf_get_branch_snapshot,
1088 .gpl_only = true,
1089 .ret_type = RET_INTEGER,
1090 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1091 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1092};
1093
Jiri Olsaf92c1e12021-12-08 20:32:44 +01001094BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1095{
1096 /* This helper call is inlined by verifier. */
1097 u64 nr_args = ((u64 *)ctx)[-1];
1098
1099 if ((u64) n >= nr_args)
1100 return -EINVAL;
1101 *value = ((u64 *)ctx)[n];
1102 return 0;
1103}
1104
1105static const struct bpf_func_proto bpf_get_func_arg_proto = {
1106 .func = get_func_arg,
1107 .ret_type = RET_INTEGER,
1108 .arg1_type = ARG_PTR_TO_CTX,
1109 .arg2_type = ARG_ANYTHING,
1110 .arg3_type = ARG_PTR_TO_LONG,
1111};
1112
1113BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1114{
1115 /* This helper call is inlined by verifier. */
1116 u64 nr_args = ((u64 *)ctx)[-1];
1117
1118 *value = ((u64 *)ctx)[nr_args];
1119 return 0;
1120}
1121
1122static const struct bpf_func_proto bpf_get_func_ret_proto = {
1123 .func = get_func_ret,
1124 .ret_type = RET_INTEGER,
1125 .arg1_type = ARG_PTR_TO_CTX,
1126 .arg2_type = ARG_PTR_TO_LONG,
1127};
1128
1129BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1130{
1131 /* This helper call is inlined by verifier. */
1132 return ((u64 *)ctx)[-1];
1133}
1134
1135static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1136 .func = get_func_arg_cnt,
1137 .ret_type = RET_INTEGER,
1138 .arg1_type = ARG_PTR_TO_CTX,
1139};
1140
Andrii Nakryiko7adfc6c2021-08-15 00:05:59 -07001141static const struct bpf_func_proto *
KP Singhfc611f42020-03-29 01:43:49 +01001142bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
Alexei Starovoitov25415172015-03-25 12:49:20 -07001143{
1144 switch (func_id) {
1145 case BPF_FUNC_map_lookup_elem:
1146 return &bpf_map_lookup_elem_proto;
1147 case BPF_FUNC_map_update_elem:
1148 return &bpf_map_update_elem_proto;
1149 case BPF_FUNC_map_delete_elem:
1150 return &bpf_map_delete_elem_proto;
Alban Crequy02a8c812019-04-14 18:58:46 +02001151 case BPF_FUNC_map_push_elem:
1152 return &bpf_map_push_elem_proto;
1153 case BPF_FUNC_map_pop_elem:
1154 return &bpf_map_pop_elem_proto;
1155 case BPF_FUNC_map_peek_elem:
1156 return &bpf_map_peek_elem_proto;
Alexei Starovoitovd9847d32015-03-25 12:49:21 -07001157 case BPF_FUNC_ktime_get_ns:
1158 return &bpf_ktime_get_ns_proto;
Maciej Żenczykowski71d19212020-04-26 09:15:25 -07001159 case BPF_FUNC_ktime_get_boot_ns:
1160 return &bpf_ktime_get_boot_ns_proto;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001161 case BPF_FUNC_tail_call:
1162 return &bpf_tail_call_proto;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -07001163 case BPF_FUNC_get_current_pid_tgid:
1164 return &bpf_get_current_pid_tgid_proto;
Alexei Starovoitov606274c2016-07-06 22:38:36 -07001165 case BPF_FUNC_get_current_task:
1166 return &bpf_get_current_task_proto;
KP Singh3ca10322020-11-06 10:37:43 +00001167 case BPF_FUNC_get_current_task_btf:
1168 return &bpf_get_current_task_btf_proto;
Daniel Xudd6e10f2021-08-23 19:43:49 -07001169 case BPF_FUNC_task_pt_regs:
1170 return &bpf_task_pt_regs_proto;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -07001171 case BPF_FUNC_get_current_uid_gid:
1172 return &bpf_get_current_uid_gid_proto;
1173 case BPF_FUNC_get_current_comm:
1174 return &bpf_get_current_comm_proto;
Alexei Starovoitov9c959c82015-03-25 12:49:22 -07001175 case BPF_FUNC_trace_printk:
Alexei Starovoitov0756ea32015-06-12 19:39:13 -07001176 return bpf_get_trace_printk_proto();
Alexei Starovoitovab1973d2015-06-12 19:39:14 -07001177 case BPF_FUNC_get_smp_processor_id:
1178 return &bpf_get_smp_processor_id_proto;
Daniel Borkmann2d0e30c2016-10-21 12:46:33 +02001179 case BPF_FUNC_get_numa_node_id:
1180 return &bpf_get_numa_node_id_proto;
Kaixu Xia35578d72015-08-06 07:02:35 +00001181 case BPF_FUNC_perf_event_read:
1182 return &bpf_perf_event_read_proto;
Sargun Dhillon60d20f92016-08-12 08:56:52 -07001183 case BPF_FUNC_current_task_under_cgroup:
1184 return &bpf_current_task_under_cgroup_proto;
Alexei Starovoitov8937bd82016-08-11 18:17:18 -07001185 case BPF_FUNC_get_prandom_u32:
1186 return &bpf_get_prandom_u32_proto;
Daniel Borkmann51e1bb92021-08-09 12:43:17 +02001187 case BPF_FUNC_probe_write_user:
1188 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1189 NULL : bpf_get_probe_write_proto();
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +01001190 case BPF_FUNC_probe_read_user:
1191 return &bpf_probe_read_user_proto;
1192 case BPF_FUNC_probe_read_kernel:
Daniel Borkmann71330842021-08-09 21:45:32 +02001193 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
Daniel Borkmannff40e512021-05-28 09:16:31 +00001194 NULL : &bpf_probe_read_kernel_proto;
Daniel Borkmann6ae08ae2019-11-02 00:17:59 +01001195 case BPF_FUNC_probe_read_user_str:
1196 return &bpf_probe_read_user_str_proto;
1197 case BPF_FUNC_probe_read_kernel_str:
Daniel Borkmann71330842021-08-09 21:45:32 +02001198 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
Daniel Borkmannff40e512021-05-28 09:16:31 +00001199 NULL : &bpf_probe_read_kernel_str_proto;
Daniel Borkmann0ebeea82020-05-15 12:11:16 +02001200#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1201 case BPF_FUNC_probe_read:
Daniel Borkmann71330842021-08-09 21:45:32 +02001202 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
Daniel Borkmannff40e512021-05-28 09:16:31 +00001203 NULL : &bpf_probe_read_compat_proto;
Gianluca Borelloa5e8c072017-01-18 17:55:49 +00001204 case BPF_FUNC_probe_read_str:
Daniel Borkmann71330842021-08-09 21:45:32 +02001205 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
Daniel Borkmannff40e512021-05-28 09:16:31 +00001206 NULL : &bpf_probe_read_compat_str_proto;
Daniel Borkmann0ebeea82020-05-15 12:11:16 +02001207#endif
Yonghong Song34ea38c2018-06-04 08:53:41 -07001208#ifdef CONFIG_CGROUPS
Yonghong Songbf6fa2c82018-06-03 15:59:41 -07001209 case BPF_FUNC_get_current_cgroup_id:
1210 return &bpf_get_current_cgroup_id_proto;
Namhyung Kim95b861a792021-06-27 08:36:27 -07001211 case BPF_FUNC_get_current_ancestor_cgroup_id:
1212 return &bpf_get_current_ancestor_cgroup_id_proto;
Yonghong Song34ea38c2018-06-04 08:53:41 -07001213#endif
Yonghong Song8b401f92019-05-23 14:47:45 -07001214 case BPF_FUNC_send_signal:
1215 return &bpf_send_signal_proto;
Yonghong Song84829412020-01-14 19:50:02 -08001216 case BPF_FUNC_send_signal_thread:
1217 return &bpf_send_signal_thread_proto;
Song Liub80b0332020-02-14 15:41:46 -08001218 case BPF_FUNC_perf_event_read_value:
1219 return &bpf_perf_event_read_value_proto;
Carlos Neirab4490c52020-03-04 17:41:56 -03001220 case BPF_FUNC_get_ns_current_pid_tgid:
1221 return &bpf_get_ns_current_pid_tgid_proto;
Andrii Nakryiko457f4432020-05-29 00:54:20 -07001222 case BPF_FUNC_ringbuf_output:
1223 return &bpf_ringbuf_output_proto;
1224 case BPF_FUNC_ringbuf_reserve:
1225 return &bpf_ringbuf_reserve_proto;
1226 case BPF_FUNC_ringbuf_submit:
1227 return &bpf_ringbuf_submit_proto;
1228 case BPF_FUNC_ringbuf_discard:
1229 return &bpf_ringbuf_discard_proto;
1230 case BPF_FUNC_ringbuf_query:
1231 return &bpf_ringbuf_query_proto;
Yonghong Song72e2b2b2020-06-23 16:08:08 -07001232 case BPF_FUNC_jiffies64:
1233 return &bpf_jiffies64_proto;
Song Liufa28dcb2020-06-29 23:28:44 -07001234 case BPF_FUNC_get_task_stack:
1235 return &bpf_get_task_stack_proto;
Alexei Starovoitov07be4c42020-08-27 15:01:12 -07001236 case BPF_FUNC_copy_from_user:
1237 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
Alan Maguirec4d0bfb2020-09-28 12:31:05 +01001238 case BPF_FUNC_snprintf_btf:
1239 return &bpf_snprintf_btf_proto;
Andrii Nakryikob7906b72020-12-11 22:36:25 +01001240 case BPF_FUNC_per_cpu_ptr:
Hao Luoeaa6bcb2020-09-29 16:50:47 -07001241 return &bpf_per_cpu_ptr_proto;
Andrii Nakryikob7906b72020-12-11 22:36:25 +01001242 case BPF_FUNC_this_cpu_ptr:
Hao Luo63d9b802020-09-29 16:50:48 -07001243 return &bpf_this_cpu_ptr_proto;
Song Liua10787e2021-02-25 15:43:14 -08001244 case BPF_FUNC_task_storage_get:
1245 return &bpf_task_storage_get_proto;
1246 case BPF_FUNC_task_storage_delete:
1247 return &bpf_task_storage_delete_proto;
Yonghong Song69c087b2021-02-26 12:49:25 -08001248 case BPF_FUNC_for_each_map_elem:
1249 return &bpf_for_each_map_elem_proto;
Florent Revest7b155232021-04-19 17:52:40 +02001250 case BPF_FUNC_snprintf:
1251 return &bpf_snprintf_proto;
Jiri Olsa9b99edc2021-07-14 11:43:55 +02001252 case BPF_FUNC_get_func_ip:
1253 return &bpf_get_func_ip_proto_tracing;
Song Liu856c02d2021-09-10 11:33:51 -07001254 case BPF_FUNC_get_branch_snapshot:
1255 return &bpf_get_branch_snapshot_proto;
Song Liu7c7e3d32021-11-05 16:23:29 -07001256 case BPF_FUNC_find_vma:
1257 return &bpf_find_vma_proto;
Dave Marchevsky10aceb62021-09-17 11:29:05 -07001258 case BPF_FUNC_trace_vprintk:
1259 return bpf_get_trace_vprintk_proto();
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001260 default:
Alexei Starovoitovb00628b2021-07-14 17:54:09 -07001261 return bpf_base_func_proto(func_id);
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001262 }
1263}
1264
Andrey Ignatov5e43f892018-03-30 15:08:00 -07001265static const struct bpf_func_proto *
1266kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001267{
1268 switch (func_id) {
Alexei Starovoitova43eec32015-10-20 20:02:34 -07001269 case BPF_FUNC_perf_event_output:
1270 return &bpf_perf_event_output_proto;
Alexei Starovoitovd5a3b1f2016-02-17 19:58:58 -08001271 case BPF_FUNC_get_stackid:
1272 return &bpf_get_stackid_proto;
Yonghong Songc195651e2018-04-28 22:28:08 -07001273 case BPF_FUNC_get_stack:
1274 return &bpf_get_stack_proto;
Josef Bacik9802d862017-12-11 11:36:48 -05001275#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1276 case BPF_FUNC_override_return:
1277 return &bpf_override_return_proto;
1278#endif
Jiri Olsa9ffd9f32021-07-14 11:43:56 +02001279 case BPF_FUNC_get_func_ip:
1280 return &bpf_get_func_ip_proto_kprobe;
Andrii Nakryiko7adfc6c2021-08-15 00:05:59 -07001281 case BPF_FUNC_get_attach_cookie:
1282 return &bpf_get_attach_cookie_proto_trace;
Alexei Starovoitov25415172015-03-25 12:49:20 -07001283 default:
KP Singhfc611f42020-03-29 01:43:49 +01001284 return bpf_tracing_func_proto(func_id, prog);
Alexei Starovoitov25415172015-03-25 12:49:20 -07001285 }
1286}
1287
1288/* bpf+kprobe programs can access fields of 'struct pt_regs' */
Alexei Starovoitov19de99f2016-06-15 18:25:38 -07001289static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
Andrey Ignatov5e43f892018-03-30 15:08:00 -07001290 const struct bpf_prog *prog,
Yonghong Song23994632017-06-22 15:07:39 -07001291 struct bpf_insn_access_aux *info)
Alexei Starovoitov25415172015-03-25 12:49:20 -07001292{
Alexei Starovoitov25415172015-03-25 12:49:20 -07001293 if (off < 0 || off >= sizeof(struct pt_regs))
1294 return false;
Alexei Starovoitov25415172015-03-25 12:49:20 -07001295 if (type != BPF_READ)
1296 return false;
Alexei Starovoitov25415172015-03-25 12:49:20 -07001297 if (off % size != 0)
1298 return false;
Daniel Borkmann2d071c62017-01-15 01:34:25 +01001299 /*
1300 * Assertion for 32 bit to make sure last 8 byte access
1301 * (BPF_DW) to the last 4 byte member is disallowed.
1302 */
1303 if (off + size > sizeof(struct pt_regs))
1304 return false;
1305
Alexei Starovoitov25415172015-03-25 12:49:20 -07001306 return true;
1307}
1308
Jakub Kicinski7de16e32017-10-16 16:40:53 -07001309const struct bpf_verifier_ops kprobe_verifier_ops = {
Alexei Starovoitov25415172015-03-25 12:49:20 -07001310 .get_func_proto = kprobe_prog_func_proto,
1311 .is_valid_access = kprobe_prog_is_valid_access,
1312};
1313
Jakub Kicinski7de16e32017-10-16 16:40:53 -07001314const struct bpf_prog_ops kprobe_prog_ops = {
1315};
1316
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001317BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1318 u64, flags, void *, data, u64, size)
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001319{
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001320 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1321
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001322 /*
1323 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1324 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001325 * from there and call the same bpf_perf_event_output() helper inline.
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001326 */
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001327 return ____bpf_perf_event_output(regs, map, flags, data, size);
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001328}
1329
1330static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1331 .func = bpf_perf_event_output_tp,
1332 .gpl_only = true,
1333 .ret_type = RET_INTEGER,
1334 .arg1_type = ARG_PTR_TO_CTX,
1335 .arg2_type = ARG_CONST_MAP_PTR,
1336 .arg3_type = ARG_ANYTHING,
Hao Luo216e3cd2021-12-16 16:31:51 -08001337 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Gianluca Borelloa60dd352017-11-22 18:32:56 +00001338 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001339};
1340
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001341BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1342 u64, flags)
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001343{
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001344 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001345
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001346 /*
1347 * Same comment as in bpf_perf_event_output_tp(), only that this time
1348 * the other helper's function body cannot be inlined due to being
1349 * external, thus we need to call raw helper function.
1350 */
1351 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1352 flags, 0, 0);
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001353}
1354
1355static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1356 .func = bpf_get_stackid_tp,
1357 .gpl_only = true,
1358 .ret_type = RET_INTEGER,
1359 .arg1_type = ARG_PTR_TO_CTX,
1360 .arg2_type = ARG_CONST_MAP_PTR,
1361 .arg3_type = ARG_ANYTHING,
1362};
1363
Yonghong Songc195651e2018-04-28 22:28:08 -07001364BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1365 u64, flags)
1366{
1367 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1368
1369 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1370 (unsigned long) size, flags, 0);
1371}
1372
1373static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1374 .func = bpf_get_stack_tp,
1375 .gpl_only = true,
1376 .ret_type = RET_INTEGER,
1377 .arg1_type = ARG_PTR_TO_CTX,
1378 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1379 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1380 .arg4_type = ARG_ANYTHING,
1381};
1382
Andrey Ignatov5e43f892018-03-30 15:08:00 -07001383static const struct bpf_func_proto *
1384tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001385{
1386 switch (func_id) {
1387 case BPF_FUNC_perf_event_output:
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001388 return &bpf_perf_event_output_proto_tp;
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001389 case BPF_FUNC_get_stackid:
Alexei Starovoitov9940d672016-04-06 18:43:27 -07001390 return &bpf_get_stackid_proto_tp;
Yonghong Songc195651e2018-04-28 22:28:08 -07001391 case BPF_FUNC_get_stack:
1392 return &bpf_get_stack_proto_tp;
Andrii Nakryiko7adfc6c2021-08-15 00:05:59 -07001393 case BPF_FUNC_get_attach_cookie:
1394 return &bpf_get_attach_cookie_proto_trace;
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001395 default:
KP Singhfc611f42020-03-29 01:43:49 +01001396 return bpf_tracing_func_proto(func_id, prog);
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001397 }
1398}
1399
Alexei Starovoitov19de99f2016-06-15 18:25:38 -07001400static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
Andrey Ignatov5e43f892018-03-30 15:08:00 -07001401 const struct bpf_prog *prog,
Yonghong Song23994632017-06-22 15:07:39 -07001402 struct bpf_insn_access_aux *info)
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001403{
1404 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1405 return false;
1406 if (type != BPF_READ)
1407 return false;
1408 if (off % size != 0)
1409 return false;
Daniel Borkmann2d071c62017-01-15 01:34:25 +01001410
1411 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001412 return true;
1413}
1414
Jakub Kicinski7de16e32017-10-16 16:40:53 -07001415const struct bpf_verifier_ops tracepoint_verifier_ops = {
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -07001416 .get_func_proto = tp_prog_func_proto,
1417 .is_valid_access = tp_prog_is_valid_access,
1418};
1419
Jakub Kicinski7de16e32017-10-16 16:40:53 -07001420const struct bpf_prog_ops tracepoint_prog_ops = {
1421};
1422
Yonghong Songf005afe2018-03-20 11:19:17 -07001423BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1424 struct bpf_perf_event_value *, buf, u32, size)
1425{
1426 int err = -EINVAL;
1427
1428 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1429 goto clear;
1430 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1431 &buf->running);
1432 if (unlikely(err))
1433 goto clear;
1434 return 0;
1435clear:
1436 memset(buf, 0, size);
1437 return err;
1438}
1439
1440static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1441 .func = bpf_perf_prog_read_value,
1442 .gpl_only = true,
1443 .ret_type = RET_INTEGER,
1444 .arg1_type = ARG_PTR_TO_CTX,
1445 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1446 .arg3_type = ARG_CONST_SIZE,
1447};
1448
Daniel Xufff7b642020-02-17 19:04:31 -08001449BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1450 void *, buf, u32, size, u64, flags)
1451{
Daniel Xufff7b642020-02-17 19:04:31 -08001452 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1453 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1454 u32 to_copy;
1455
1456 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1457 return -EINVAL;
1458
1459 if (unlikely(!br_stack))
Kajol Jaindb52f572021-12-06 13:03:15 +05301460 return -ENOENT;
Daniel Xufff7b642020-02-17 19:04:31 -08001461
1462 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1463 return br_stack->nr * br_entry_size;
1464
1465 if (!buf || (size % br_entry_size != 0))
1466 return -EINVAL;
1467
1468 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1469 memcpy(buf, br_stack->entries, to_copy);
1470
1471 return to_copy;
Daniel Xufff7b642020-02-17 19:04:31 -08001472}
1473
1474static const struct bpf_func_proto bpf_read_branch_records_proto = {
1475 .func = bpf_read_branch_records,
1476 .gpl_only = true,
1477 .ret_type = RET_INTEGER,
1478 .arg1_type = ARG_PTR_TO_CTX,
1479 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1480 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1481 .arg4_type = ARG_ANYTHING,
1482};
1483
Andrey Ignatov5e43f892018-03-30 15:08:00 -07001484static const struct bpf_func_proto *
1485pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
Yonghong Songf005afe2018-03-20 11:19:17 -07001486{
1487 switch (func_id) {
1488 case BPF_FUNC_perf_event_output:
1489 return &bpf_perf_event_output_proto_tp;
1490 case BPF_FUNC_get_stackid:
Song Liu7b04d6d2020-07-23 11:06:44 -07001491 return &bpf_get_stackid_proto_pe;
Yonghong Songc195651e2018-04-28 22:28:08 -07001492 case BPF_FUNC_get_stack:
Song Liu7b04d6d2020-07-23 11:06:44 -07001493 return &bpf_get_stack_proto_pe;
Yonghong Songf005afe2018-03-20 11:19:17 -07001494 case BPF_FUNC_perf_prog_read_value:
1495 return &bpf_perf_prog_read_value_proto;
Daniel Xufff7b642020-02-17 19:04:31 -08001496 case BPF_FUNC_read_branch_records:
1497 return &bpf_read_branch_records_proto;
Andrii Nakryiko7adfc6c2021-08-15 00:05:59 -07001498 case BPF_FUNC_get_attach_cookie:
1499 return &bpf_get_attach_cookie_proto_pe;
Yonghong Songf005afe2018-03-20 11:19:17 -07001500 default:
KP Singhfc611f42020-03-29 01:43:49 +01001501 return bpf_tracing_func_proto(func_id, prog);
Yonghong Songf005afe2018-03-20 11:19:17 -07001502 }
1503}
1504
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001505/*
1506 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1507 * to avoid potential recursive reuse issue when/if tracepoints are added
Matt Mullins9594dc32019-06-11 14:53:04 -07001508 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1509 *
1510 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1511 * in normal, irq, and nmi context.
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001512 */
Matt Mullins9594dc32019-06-11 14:53:04 -07001513struct bpf_raw_tp_regs {
1514 struct pt_regs regs[3];
1515};
1516static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1517static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1518static struct pt_regs *get_bpf_raw_tp_regs(void)
1519{
1520 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1521 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1522
1523 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1524 this_cpu_dec(bpf_raw_tp_nest_level);
1525 return ERR_PTR(-EBUSY);
1526 }
1527
1528 return &tp_regs->regs[nest_level - 1];
1529}
1530
1531static void put_bpf_raw_tp_regs(void)
1532{
1533 this_cpu_dec(bpf_raw_tp_nest_level);
1534}
1535
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001536BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1537 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1538{
Matt Mullins9594dc32019-06-11 14:53:04 -07001539 struct pt_regs *regs = get_bpf_raw_tp_regs();
1540 int ret;
1541
1542 if (IS_ERR(regs))
1543 return PTR_ERR(regs);
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001544
1545 perf_fetch_caller_regs(regs);
Matt Mullins9594dc32019-06-11 14:53:04 -07001546 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1547
1548 put_bpf_raw_tp_regs();
1549 return ret;
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001550}
1551
1552static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1553 .func = bpf_perf_event_output_raw_tp,
1554 .gpl_only = true,
1555 .ret_type = RET_INTEGER,
1556 .arg1_type = ARG_PTR_TO_CTX,
1557 .arg2_type = ARG_CONST_MAP_PTR,
1558 .arg3_type = ARG_ANYTHING,
Hao Luo216e3cd2021-12-16 16:31:51 -08001559 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001560 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1561};
1562
Alexei Starovoitova7658e12019-10-15 20:25:04 -07001563extern const struct bpf_func_proto bpf_skb_output_proto;
Eelco Chaudrond831ee82020-03-06 08:59:23 +00001564extern const struct bpf_func_proto bpf_xdp_output_proto;
Alexei Starovoitova7658e12019-10-15 20:25:04 -07001565
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001566BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1567 struct bpf_map *, map, u64, flags)
1568{
Matt Mullins9594dc32019-06-11 14:53:04 -07001569 struct pt_regs *regs = get_bpf_raw_tp_regs();
1570 int ret;
1571
1572 if (IS_ERR(regs))
1573 return PTR_ERR(regs);
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001574
1575 perf_fetch_caller_regs(regs);
1576 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
Matt Mullins9594dc32019-06-11 14:53:04 -07001577 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1578 flags, 0, 0);
1579 put_bpf_raw_tp_regs();
1580 return ret;
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001581}
1582
1583static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1584 .func = bpf_get_stackid_raw_tp,
1585 .gpl_only = true,
1586 .ret_type = RET_INTEGER,
1587 .arg1_type = ARG_PTR_TO_CTX,
1588 .arg2_type = ARG_CONST_MAP_PTR,
1589 .arg3_type = ARG_ANYTHING,
1590};
1591
Yonghong Songc195651e2018-04-28 22:28:08 -07001592BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1593 void *, buf, u32, size, u64, flags)
1594{
Matt Mullins9594dc32019-06-11 14:53:04 -07001595 struct pt_regs *regs = get_bpf_raw_tp_regs();
1596 int ret;
1597
1598 if (IS_ERR(regs))
1599 return PTR_ERR(regs);
Yonghong Songc195651e2018-04-28 22:28:08 -07001600
1601 perf_fetch_caller_regs(regs);
Matt Mullins9594dc32019-06-11 14:53:04 -07001602 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1603 (unsigned long) size, flags, 0);
1604 put_bpf_raw_tp_regs();
1605 return ret;
Yonghong Songc195651e2018-04-28 22:28:08 -07001606}
1607
1608static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1609 .func = bpf_get_stack_raw_tp,
1610 .gpl_only = true,
1611 .ret_type = RET_INTEGER,
1612 .arg1_type = ARG_PTR_TO_CTX,
Hao Luo216e3cd2021-12-16 16:31:51 -08001613 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
Yonghong Songc195651e2018-04-28 22:28:08 -07001614 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1615 .arg4_type = ARG_ANYTHING,
1616};
1617
Andrey Ignatov5e43f892018-03-30 15:08:00 -07001618static const struct bpf_func_proto *
1619raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001620{
1621 switch (func_id) {
1622 case BPF_FUNC_perf_event_output:
1623 return &bpf_perf_event_output_proto_raw_tp;
1624 case BPF_FUNC_get_stackid:
1625 return &bpf_get_stackid_proto_raw_tp;
Yonghong Songc195651e2018-04-28 22:28:08 -07001626 case BPF_FUNC_get_stack:
1627 return &bpf_get_stack_proto_raw_tp;
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001628 default:
KP Singhfc611f42020-03-29 01:43:49 +01001629 return bpf_tracing_func_proto(func_id, prog);
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001630 }
1631}
1632
Jiri Olsa958a3f22020-05-31 17:42:55 +02001633const struct bpf_func_proto *
Alexei Starovoitovf1b95092019-10-30 15:32:11 -07001634tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1635{
Martin KaFai Lau3cee6fb2021-07-01 13:06:19 -07001636 const struct bpf_func_proto *fn;
1637
Alexei Starovoitovf1b95092019-10-30 15:32:11 -07001638 switch (func_id) {
1639#ifdef CONFIG_NET
1640 case BPF_FUNC_skb_output:
1641 return &bpf_skb_output_proto;
Eelco Chaudrond831ee82020-03-06 08:59:23 +00001642 case BPF_FUNC_xdp_output:
1643 return &bpf_xdp_output_proto;
Yonghong Songaf7ec132020-06-23 16:08:09 -07001644 case BPF_FUNC_skc_to_tcp6_sock:
1645 return &bpf_skc_to_tcp6_sock_proto;
Yonghong Song478cfbd2020-06-23 16:08:11 -07001646 case BPF_FUNC_skc_to_tcp_sock:
1647 return &bpf_skc_to_tcp_sock_proto;
1648 case BPF_FUNC_skc_to_tcp_timewait_sock:
1649 return &bpf_skc_to_tcp_timewait_sock_proto;
1650 case BPF_FUNC_skc_to_tcp_request_sock:
1651 return &bpf_skc_to_tcp_request_sock_proto;
Yonghong Song0d4fad32020-06-23 16:08:15 -07001652 case BPF_FUNC_skc_to_udp6_sock:
1653 return &bpf_skc_to_udp6_sock_proto;
Hengqi Chen9eeb3aa2021-10-21 21:47:51 +08001654 case BPF_FUNC_skc_to_unix_sock:
1655 return &bpf_skc_to_unix_sock_proto;
Martin KaFai Lau8e4597c2020-11-12 13:13:13 -08001656 case BPF_FUNC_sk_storage_get:
1657 return &bpf_sk_storage_get_tracing_proto;
1658 case BPF_FUNC_sk_storage_delete:
1659 return &bpf_sk_storage_delete_tracing_proto;
Florent Revestb60da492020-12-08 18:36:23 +01001660 case BPF_FUNC_sock_from_file:
1661 return &bpf_sock_from_file_proto;
Florent Revestc5dbb892021-02-10 12:14:03 +01001662 case BPF_FUNC_get_socket_cookie:
1663 return &bpf_get_socket_ptr_cookie_proto;
Alexei Starovoitovf1b95092019-10-30 15:32:11 -07001664#endif
Yonghong Song492e6392020-05-09 10:59:14 -07001665 case BPF_FUNC_seq_printf:
1666 return prog->expected_attach_type == BPF_TRACE_ITER ?
1667 &bpf_seq_printf_proto :
1668 NULL;
1669 case BPF_FUNC_seq_write:
1670 return prog->expected_attach_type == BPF_TRACE_ITER ?
1671 &bpf_seq_write_proto :
1672 NULL;
Alan Maguireeb411372020-09-28 12:31:09 +01001673 case BPF_FUNC_seq_printf_btf:
1674 return prog->expected_attach_type == BPF_TRACE_ITER ?
1675 &bpf_seq_printf_btf_proto :
1676 NULL;
Jiri Olsa6e22ab92020-08-25 21:21:20 +02001677 case BPF_FUNC_d_path:
1678 return &bpf_d_path_proto;
Jiri Olsaf92c1e12021-12-08 20:32:44 +01001679 case BPF_FUNC_get_func_arg:
1680 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1681 case BPF_FUNC_get_func_ret:
1682 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1683 case BPF_FUNC_get_func_arg_cnt:
1684 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
Alexei Starovoitovf1b95092019-10-30 15:32:11 -07001685 default:
Martin KaFai Lau3cee6fb2021-07-01 13:06:19 -07001686 fn = raw_tp_prog_func_proto(func_id, prog);
1687 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1688 fn = bpf_iter_get_func_proto(func_id, prog);
1689 return fn;
Alexei Starovoitovf1b95092019-10-30 15:32:11 -07001690 }
1691}
1692
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001693static bool raw_tp_prog_is_valid_access(int off, int size,
1694 enum bpf_access_type type,
Andrey Ignatov5e43f892018-03-30 15:08:00 -07001695 const struct bpf_prog *prog,
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001696 struct bpf_insn_access_aux *info)
1697{
Hou Tao35346ab2021-10-25 14:40:23 +08001698 return bpf_tracing_ctx_access(off, size, type);
Alexei Starovoitovf1b95092019-10-30 15:32:11 -07001699}
1700
1701static bool tracing_prog_is_valid_access(int off, int size,
1702 enum bpf_access_type type,
1703 const struct bpf_prog *prog,
1704 struct bpf_insn_access_aux *info)
1705{
Hou Tao35346ab2021-10-25 14:40:23 +08001706 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001707}
1708
KP Singh3e7c67d2020-03-05 23:01:27 +01001709int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1710 const union bpf_attr *kattr,
1711 union bpf_attr __user *uattr)
1712{
1713 return -ENOTSUPP;
1714}
1715
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001716const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1717 .get_func_proto = raw_tp_prog_func_proto,
1718 .is_valid_access = raw_tp_prog_is_valid_access,
1719};
1720
1721const struct bpf_prog_ops raw_tracepoint_prog_ops = {
Yonghong Songebfb4d42020-10-06 23:29:33 -07001722#ifdef CONFIG_NET
Song Liu1b4d60e2020-09-25 13:54:29 -07001723 .test_run = bpf_prog_test_run_raw_tp,
Yonghong Songebfb4d42020-10-06 23:29:33 -07001724#endif
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001725};
1726
Alexei Starovoitovf1b95092019-10-30 15:32:11 -07001727const struct bpf_verifier_ops tracing_verifier_ops = {
1728 .get_func_proto = tracing_prog_func_proto,
1729 .is_valid_access = tracing_prog_is_valid_access,
1730};
1731
1732const struct bpf_prog_ops tracing_prog_ops = {
KP Singhda00d2f2020-03-04 20:18:52 +01001733 .test_run = bpf_prog_test_run_tracing,
Alexei Starovoitovf1b95092019-10-30 15:32:11 -07001734};
1735
Matt Mullins9df1c282019-04-26 11:49:47 -07001736static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1737 enum bpf_access_type type,
1738 const struct bpf_prog *prog,
1739 struct bpf_insn_access_aux *info)
1740{
1741 if (off == 0) {
1742 if (size != sizeof(u64) || type != BPF_READ)
1743 return false;
1744 info->reg_type = PTR_TO_TP_BUFFER;
1745 }
1746 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1747}
1748
1749const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1750 .get_func_proto = raw_tp_prog_func_proto,
1751 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1752};
1753
1754const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1755};
1756
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001757static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
Andrey Ignatov5e43f892018-03-30 15:08:00 -07001758 const struct bpf_prog *prog,
Yonghong Song23994632017-06-22 15:07:39 -07001759 struct bpf_insn_access_aux *info)
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001760{
Teng Qin95da0cd2018-03-06 10:55:01 -08001761 const int size_u64 = sizeof(u64);
Yonghong Song31fd8582017-06-13 15:52:13 -07001762
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001763 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1764 return false;
1765 if (type != BPF_READ)
1766 return false;
Daniel Borkmannbc231052018-06-02 23:06:39 +02001767 if (off % size != 0) {
1768 if (sizeof(unsigned long) != 4)
1769 return false;
1770 if (size != 8)
1771 return false;
1772 if (off % size != 4)
1773 return false;
1774 }
Yonghong Song31fd8582017-06-13 15:52:13 -07001775
Daniel Borkmannf96da092017-07-02 02:13:27 +02001776 switch (off) {
1777 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
Teng Qin95da0cd2018-03-06 10:55:01 -08001778 bpf_ctx_record_field_size(info, size_u64);
1779 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1780 return false;
1781 break;
1782 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1783 bpf_ctx_record_field_size(info, size_u64);
1784 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
Yonghong Song23994632017-06-22 15:07:39 -07001785 return false;
Daniel Borkmannf96da092017-07-02 02:13:27 +02001786 break;
1787 default:
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001788 if (size != sizeof(long))
1789 return false;
1790 }
Daniel Borkmannf96da092017-07-02 02:13:27 +02001791
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001792 return true;
1793}
1794
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +01001795static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1796 const struct bpf_insn *si,
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001797 struct bpf_insn *insn_buf,
Daniel Borkmannf96da092017-07-02 02:13:27 +02001798 struct bpf_prog *prog, u32 *target_size)
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001799{
1800 struct bpf_insn *insn = insn_buf;
1801
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +01001802 switch (si->off) {
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001803 case offsetof(struct bpf_perf_event_data, sample_period):
Daniel Borkmannf035a512016-09-09 02:45:29 +02001804 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +01001805 data), si->dst_reg, si->src_reg,
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001806 offsetof(struct bpf_perf_event_data_kern, data));
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +01001807 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
Daniel Borkmannf96da092017-07-02 02:13:27 +02001808 bpf_target_off(struct perf_sample_data, period, 8,
1809 target_size));
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001810 break;
Teng Qin95da0cd2018-03-06 10:55:01 -08001811 case offsetof(struct bpf_perf_event_data, addr):
1812 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1813 data), si->dst_reg, si->src_reg,
1814 offsetof(struct bpf_perf_event_data_kern, data));
1815 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1816 bpf_target_off(struct perf_sample_data, addr, 8,
1817 target_size));
1818 break;
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001819 default:
Daniel Borkmannf035a512016-09-09 02:45:29 +02001820 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +01001821 regs), si->dst_reg, si->src_reg,
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001822 offsetof(struct bpf_perf_event_data_kern, regs));
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +01001823 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1824 si->off);
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001825 break;
1826 }
1827
1828 return insn - insn_buf;
1829}
1830
Jakub Kicinski7de16e32017-10-16 16:40:53 -07001831const struct bpf_verifier_ops perf_event_verifier_ops = {
Yonghong Songf005afe2018-03-20 11:19:17 -07001832 .get_func_proto = pe_prog_func_proto,
Alexei Starovoitov0515e592016-09-01 18:37:22 -07001833 .is_valid_access = pe_prog_is_valid_access,
1834 .convert_ctx_access = pe_prog_convert_ctx_access,
1835};
Jakub Kicinski7de16e32017-10-16 16:40:53 -07001836
1837const struct bpf_prog_ops perf_event_prog_ops = {
1838};
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001839
1840static DEFINE_MUTEX(bpf_event_mutex);
1841
Yonghong Songc8c088b2017-11-30 13:47:54 -08001842#define BPF_TRACE_MAX_PROGS 64
1843
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001844int perf_event_attach_bpf_prog(struct perf_event *event,
Andrii Nakryiko82e6b1e2021-08-15 00:05:58 -07001845 struct bpf_prog *prog,
1846 u64 bpf_cookie)
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001847{
Stanislav Fomicheve672db02019-05-28 14:14:44 -07001848 struct bpf_prog_array *old_array;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001849 struct bpf_prog_array *new_array;
1850 int ret = -EEXIST;
1851
Josef Bacik9802d862017-12-11 11:36:48 -05001852 /*
Masami Hiramatsub4da3342018-01-13 02:54:04 +09001853 * Kprobe override only works if they are on the function entry,
1854 * and only if they are on the opt-in list.
Josef Bacik9802d862017-12-11 11:36:48 -05001855 */
1856 if (prog->kprobe_override &&
Masami Hiramatsub4da3342018-01-13 02:54:04 +09001857 (!trace_kprobe_on_func_entry(event->tp_event) ||
Josef Bacik9802d862017-12-11 11:36:48 -05001858 !trace_kprobe_error_injectable(event->tp_event)))
1859 return -EINVAL;
1860
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001861 mutex_lock(&bpf_event_mutex);
1862
1863 if (event->prog)
Yonghong Song07c41a22017-10-30 13:50:22 -07001864 goto unlock;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001865
Stanislav Fomicheve672db02019-05-28 14:14:44 -07001866 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
Yonghong Songc8c088b2017-11-30 13:47:54 -08001867 if (old_array &&
1868 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1869 ret = -E2BIG;
1870 goto unlock;
1871 }
1872
Andrii Nakryiko82e6b1e2021-08-15 00:05:58 -07001873 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001874 if (ret < 0)
Yonghong Song07c41a22017-10-30 13:50:22 -07001875 goto unlock;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001876
1877 /* set the new array to event->tp_event and set event->prog */
1878 event->prog = prog;
Andrii Nakryiko82e6b1e2021-08-15 00:05:58 -07001879 event->bpf_cookie = bpf_cookie;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001880 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1881 bpf_prog_array_free(old_array);
1882
Yonghong Song07c41a22017-10-30 13:50:22 -07001883unlock:
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001884 mutex_unlock(&bpf_event_mutex);
1885 return ret;
1886}
1887
1888void perf_event_detach_bpf_prog(struct perf_event *event)
1889{
Stanislav Fomicheve672db02019-05-28 14:14:44 -07001890 struct bpf_prog_array *old_array;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001891 struct bpf_prog_array *new_array;
1892 int ret;
1893
1894 mutex_lock(&bpf_event_mutex);
1895
1896 if (!event->prog)
Yonghong Song07c41a22017-10-30 13:50:22 -07001897 goto unlock;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001898
Stanislav Fomicheve672db02019-05-28 14:14:44 -07001899 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
Andrii Nakryiko82e6b1e2021-08-15 00:05:58 -07001900 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
Sean Young170a7e32018-05-27 12:24:08 +01001901 if (ret == -ENOENT)
1902 goto unlock;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001903 if (ret < 0) {
1904 bpf_prog_array_delete_safe(old_array, event->prog);
1905 } else {
1906 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1907 bpf_prog_array_free(old_array);
1908 }
1909
1910 bpf_prog_put(event->prog);
1911 event->prog = NULL;
1912
Yonghong Song07c41a22017-10-30 13:50:22 -07001913unlock:
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001914 mutex_unlock(&bpf_event_mutex);
1915}
Yonghong Songf371b302017-12-11 11:39:02 -08001916
Yonghong Songf4e22982017-12-13 10:35:37 -08001917int perf_event_query_prog_array(struct perf_event *event, void __user *info)
Yonghong Songf371b302017-12-11 11:39:02 -08001918{
1919 struct perf_event_query_bpf __user *uquery = info;
1920 struct perf_event_query_bpf query = {};
Stanislav Fomicheve672db02019-05-28 14:14:44 -07001921 struct bpf_prog_array *progs;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001922 u32 *ids, prog_cnt, ids_len;
Yonghong Songf371b302017-12-11 11:39:02 -08001923 int ret;
1924
Alexey Budankov031258d2020-04-02 11:48:54 +03001925 if (!perfmon_capable())
Yonghong Songf371b302017-12-11 11:39:02 -08001926 return -EPERM;
1927 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1928 return -EINVAL;
1929 if (copy_from_user(&query, uquery, sizeof(query)))
1930 return -EFAULT;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001931
1932 ids_len = query.ids_len;
1933 if (ids_len > BPF_TRACE_MAX_PROGS)
Daniel Borkmann9c481b92018-02-14 15:31:00 +01001934 return -E2BIG;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001935 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1936 if (!ids)
1937 return -ENOMEM;
1938 /*
1939 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1940 * is required when user only wants to check for uquery->prog_cnt.
1941 * There is no need to check for it since the case is handled
1942 * gracefully in bpf_prog_array_copy_info.
1943 */
Yonghong Songf371b302017-12-11 11:39:02 -08001944
1945 mutex_lock(&bpf_event_mutex);
Stanislav Fomicheve672db02019-05-28 14:14:44 -07001946 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1947 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
Yonghong Songf371b302017-12-11 11:39:02 -08001948 mutex_unlock(&bpf_event_mutex);
1949
Yonghong Song3a38bb92018-04-10 09:37:32 -07001950 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1951 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1952 ret = -EFAULT;
1953
1954 kfree(ids);
Yonghong Songf371b302017-12-11 11:39:02 -08001955 return ret;
1956}
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001957
1958extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1959extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1960
Matt Mullinsa38d1102018-12-12 16:42:37 -08001961struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001962{
1963 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1964
1965 for (; btp < __stop__bpf_raw_tp; btp++) {
1966 if (!strcmp(btp->tp->name, name))
1967 return btp;
1968 }
Matt Mullinsa38d1102018-12-12 16:42:37 -08001969
1970 return bpf_get_raw_tracepoint_module(name);
1971}
1972
1973void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1974{
Andrii Nakryiko12cc1262020-12-03 12:46:21 -08001975 struct module *mod;
Matt Mullinsa38d1102018-12-12 16:42:37 -08001976
Andrii Nakryiko12cc1262020-12-03 12:46:21 -08001977 preempt_disable();
1978 mod = __module_address((unsigned long)btp);
1979 module_put(mod);
1980 preempt_enable();
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001981}
1982
1983static __always_inline
1984void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1985{
Thomas Gleixnerf03efe42020-02-24 15:01:35 +01001986 cant_sleep();
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001987 rcu_read_lock();
Andrii Nakryikofb7dd8b2021-08-15 00:05:54 -07001988 (void) bpf_prog_run(prog, args);
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001989 rcu_read_unlock();
1990}
1991
1992#define UNPACK(...) __VA_ARGS__
1993#define REPEAT_1(FN, DL, X, ...) FN(X)
1994#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1995#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1996#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1997#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1998#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1999#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2000#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2001#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2002#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2003#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2004#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2005#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2006
2007#define SARG(X) u64 arg##X
2008#define COPY(X) args[X] = arg##X
2009
2010#define __DL_COM (,)
2011#define __DL_SEM (;)
2012
2013#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2014
2015#define BPF_TRACE_DEFN_x(x) \
2016 void bpf_trace_run##x(struct bpf_prog *prog, \
2017 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2018 { \
2019 u64 args[x]; \
2020 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2021 __bpf_trace_run(prog, args); \
2022 } \
2023 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2024BPF_TRACE_DEFN_x(1);
2025BPF_TRACE_DEFN_x(2);
2026BPF_TRACE_DEFN_x(3);
2027BPF_TRACE_DEFN_x(4);
2028BPF_TRACE_DEFN_x(5);
2029BPF_TRACE_DEFN_x(6);
2030BPF_TRACE_DEFN_x(7);
2031BPF_TRACE_DEFN_x(8);
2032BPF_TRACE_DEFN_x(9);
2033BPF_TRACE_DEFN_x(10);
2034BPF_TRACE_DEFN_x(11);
2035BPF_TRACE_DEFN_x(12);
2036
2037static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2038{
2039 struct tracepoint *tp = btp->tp;
2040
2041 /*
2042 * check that program doesn't access arguments beyond what's
2043 * available in this tracepoint
2044 */
2045 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2046 return -EINVAL;
2047
Matt Mullins9df1c282019-04-26 11:49:47 -07002048 if (prog->aux->max_tp_access > btp->writable_size)
2049 return -EINVAL;
2050
Steven Rostedt (VMware)9913d572021-06-29 09:40:10 -04002051 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2052 prog);
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07002053}
2054
2055int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2056{
Alexei Starovoitove16ec342019-01-30 18:12:44 -08002057 return __bpf_probe_register(btp, prog);
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07002058}
2059
2060int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2061{
Alexei Starovoitove16ec342019-01-30 18:12:44 -08002062 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07002063}
Yonghong Song41bdc4b2018-05-24 11:21:09 -07002064
2065int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2066 u32 *fd_type, const char **buf,
2067 u64 *probe_offset, u64 *probe_addr)
2068{
2069 bool is_tracepoint, is_syscall_tp;
2070 struct bpf_prog *prog;
2071 int flags, err = 0;
2072
2073 prog = event->prog;
2074 if (!prog)
2075 return -ENOENT;
2076
2077 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2078 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2079 return -EOPNOTSUPP;
2080
2081 *prog_id = prog->aux->id;
2082 flags = event->tp_event->flags;
2083 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2084 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2085
2086 if (is_tracepoint || is_syscall_tp) {
2087 *buf = is_tracepoint ? event->tp_event->tp->name
2088 : event->tp_event->name;
2089 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2090 *probe_offset = 0x0;
2091 *probe_addr = 0x0;
2092 } else {
2093 /* kprobe/uprobe */
2094 err = -EOPNOTSUPP;
2095#ifdef CONFIG_KPROBE_EVENTS
2096 if (flags & TRACE_EVENT_FL_KPROBE)
2097 err = bpf_get_kprobe_info(event, fd_type, buf,
2098 probe_offset, probe_addr,
2099 event->attr.type == PERF_TYPE_TRACEPOINT);
2100#endif
2101#ifdef CONFIG_UPROBE_EVENTS
2102 if (flags & TRACE_EVENT_FL_UPROBE)
2103 err = bpf_get_uprobe_info(event, fd_type, buf,
2104 probe_offset,
2105 event->attr.type == PERF_TYPE_TRACEPOINT);
2106#endif
2107 }
2108
2109 return err;
2110}
Matt Mullinsa38d1102018-12-12 16:42:37 -08002111
Yonghong Song9db1ff02019-06-25 17:35:03 -07002112static int __init send_signal_irq_work_init(void)
2113{
2114 int cpu;
2115 struct send_signal_irq_work *work;
2116
2117 for_each_possible_cpu(cpu) {
2118 work = per_cpu_ptr(&send_signal_work, cpu);
2119 init_irq_work(&work->irq_work, do_bpf_send_signal);
2120 }
2121 return 0;
2122}
2123
2124subsys_initcall(send_signal_irq_work_init);
2125
Matt Mullinsa38d1102018-12-12 16:42:37 -08002126#ifdef CONFIG_MODULES
Stanislav Fomichev390e99c2019-05-13 12:04:36 -07002127static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2128 void *module)
Matt Mullinsa38d1102018-12-12 16:42:37 -08002129{
2130 struct bpf_trace_module *btm, *tmp;
2131 struct module *mod = module;
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02002132 int ret = 0;
Matt Mullinsa38d1102018-12-12 16:42:37 -08002133
2134 if (mod->num_bpf_raw_events == 0 ||
2135 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02002136 goto out;
Matt Mullinsa38d1102018-12-12 16:42:37 -08002137
2138 mutex_lock(&bpf_module_mutex);
2139
2140 switch (op) {
2141 case MODULE_STATE_COMING:
2142 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2143 if (btm) {
2144 btm->module = module;
2145 list_add(&btm->list, &bpf_trace_modules);
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02002146 } else {
2147 ret = -ENOMEM;
Matt Mullinsa38d1102018-12-12 16:42:37 -08002148 }
2149 break;
2150 case MODULE_STATE_GOING:
2151 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2152 if (btm->module == module) {
2153 list_del(&btm->list);
2154 kfree(btm);
2155 break;
2156 }
2157 }
2158 break;
2159 }
2160
2161 mutex_unlock(&bpf_module_mutex);
2162
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02002163out:
2164 return notifier_from_errno(ret);
Matt Mullinsa38d1102018-12-12 16:42:37 -08002165}
2166
2167static struct notifier_block bpf_module_nb = {
2168 .notifier_call = bpf_event_notify,
2169};
2170
Stanislav Fomichev390e99c2019-05-13 12:04:36 -07002171static int __init bpf_event_init(void)
Matt Mullinsa38d1102018-12-12 16:42:37 -08002172{
2173 register_module_notifier(&bpf_module_nb);
2174 return 0;
2175}
2176
2177fs_initcall(bpf_event_init);
2178#endif /* CONFIG_MODULES */