blob: 08eb072430b9e2732d22bf79394a8c9ad91be8d7 [file] [log] [blame]
Alexei Starovoitov25415172015-03-25 12:49:20 -07001/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
Alexei Starovoitov0515e592016-09-01 18:37:22 -07002 * Copyright (c) 2016 Facebook
Alexei Starovoitov25415172015-03-25 12:49:20 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bpf.h>
Alexei Starovoitov0515e592016-09-01 18:37:22 -070012#include <linux/bpf_perf_event.h>
Alexei Starovoitov25415172015-03-25 12:49:20 -070013#include <linux/filter.h>
14#include <linux/uaccess.h>
Alexei Starovoitov9c959c82015-03-25 12:49:22 -070015#include <linux/ctype.h>
Alexei Starovoitov25415172015-03-25 12:49:20 -070016#include "trace.h"
17
Alexei Starovoitov25415172015-03-25 12:49:20 -070018/**
19 * trace_call_bpf - invoke BPF program
20 * @prog: BPF program
21 * @ctx: opaque context pointer
22 *
23 * kprobe handlers execute BPF programs via this helper.
24 * Can be used from static tracepoints in the future.
25 *
26 * Return: BPF programs always return an integer which is interpreted by
27 * kprobe handler as:
28 * 0 - return from kprobe (event is filtered out)
29 * 1 - store kprobe event into ring buffer
30 * Other values are reserved and currently alias to 1
31 */
32unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
33{
34 unsigned int ret;
35
36 if (in_nmi()) /* not supported yet */
37 return 1;
38
39 preempt_disable();
40
41 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
42 /*
43 * since some bpf program is already running on this cpu,
44 * don't call into another bpf program (same or different)
45 * and don't send kprobe event into ring-buffer,
46 * so return zero here
47 */
48 ret = 0;
49 goto out;
50 }
51
52 rcu_read_lock();
53 ret = BPF_PROG_RUN(prog, ctx);
54 rcu_read_unlock();
55
56 out:
57 __this_cpu_dec(bpf_prog_active);
58 preempt_enable();
59
60 return ret;
61}
62EXPORT_SYMBOL_GPL(trace_call_bpf);
63
Daniel Borkmannf3694e02016-09-09 02:45:31 +020064BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
Alexei Starovoitov25415172015-03-25 12:49:20 -070065{
Daniel Borkmannf3694e02016-09-09 02:45:31 +020066 int ret;
Alexei Starovoitov25415172015-03-25 12:49:20 -070067
Daniel Borkmann074f528e2016-04-13 00:10:52 +020068 ret = probe_kernel_read(dst, unsafe_ptr, size);
69 if (unlikely(ret < 0))
70 memset(dst, 0, size);
71
72 return ret;
Alexei Starovoitov25415172015-03-25 12:49:20 -070073}
74
75static const struct bpf_func_proto bpf_probe_read_proto = {
76 .func = bpf_probe_read,
77 .gpl_only = true,
78 .ret_type = RET_INTEGER,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -080079 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
80 .arg2_type = ARG_CONST_SIZE,
Alexei Starovoitov25415172015-03-25 12:49:20 -070081 .arg3_type = ARG_ANYTHING,
82};
83
Daniel Borkmannf3694e02016-09-09 02:45:31 +020084BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
85 u32, size)
Sargun Dhillon96ae5222016-07-25 05:54:46 -070086{
Sargun Dhillon96ae5222016-07-25 05:54:46 -070087 /*
88 * Ensure we're in user context which is safe for the helper to
89 * run. This helper has no business in a kthread.
90 *
91 * access_ok() should prevent writing to non-user memory, but in
92 * some situations (nommu, temporary switch, etc) access_ok() does
93 * not provide enough validation, hence the check on KERNEL_DS.
94 */
95
96 if (unlikely(in_interrupt() ||
97 current->flags & (PF_KTHREAD | PF_EXITING)))
98 return -EPERM;
Al Virodb68ce12017-03-20 21:08:07 -040099 if (unlikely(uaccess_kernel()))
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700100 return -EPERM;
101 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
102 return -EPERM;
103
104 return probe_kernel_write(unsafe_ptr, src, size);
105}
106
107static const struct bpf_func_proto bpf_probe_write_user_proto = {
108 .func = bpf_probe_write_user,
109 .gpl_only = true,
110 .ret_type = RET_INTEGER,
111 .arg1_type = ARG_ANYTHING,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -0800112 .arg2_type = ARG_PTR_TO_MEM,
113 .arg3_type = ARG_CONST_SIZE,
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700114};
115
116static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
117{
118 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
119 current->comm, task_pid_nr(current));
120
121 return &bpf_probe_write_user_proto;
122}
123
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700124/*
125 * limited trace_printk()
Alexei Starovoitov8d3b7dc2015-08-28 15:56:23 -0700126 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700127 */
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200128BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
129 u64, arg2, u64, arg3)
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700130{
Alexei Starovoitov8d3b7dc2015-08-28 15:56:23 -0700131 bool str_seen = false;
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700132 int mod[3] = {};
133 int fmt_cnt = 0;
Alexei Starovoitov8d3b7dc2015-08-28 15:56:23 -0700134 u64 unsafe_addr;
135 char buf[64];
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700136 int i;
137
138 /*
139 * bpf_check()->check_func_arg()->check_stack_boundary()
140 * guarantees that fmt points to bpf program stack,
141 * fmt_size bytes of it were initialized and fmt_size > 0
142 */
143 if (fmt[--fmt_size] != 0)
144 return -EINVAL;
145
146 /* check format string for allowed specifiers */
147 for (i = 0; i < fmt_size; i++) {
148 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
149 return -EINVAL;
150
151 if (fmt[i] != '%')
152 continue;
153
154 if (fmt_cnt >= 3)
155 return -EINVAL;
156
157 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
158 i++;
159 if (fmt[i] == 'l') {
160 mod[fmt_cnt]++;
161 i++;
Alexei Starovoitov8d3b7dc2015-08-28 15:56:23 -0700162 } else if (fmt[i] == 'p' || fmt[i] == 's') {
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700163 mod[fmt_cnt]++;
164 i++;
165 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
166 return -EINVAL;
167 fmt_cnt++;
Alexei Starovoitov8d3b7dc2015-08-28 15:56:23 -0700168 if (fmt[i - 1] == 's') {
169 if (str_seen)
170 /* allow only one '%s' per fmt string */
171 return -EINVAL;
172 str_seen = true;
173
174 switch (fmt_cnt) {
175 case 1:
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200176 unsafe_addr = arg1;
177 arg1 = (long) buf;
Alexei Starovoitov8d3b7dc2015-08-28 15:56:23 -0700178 break;
179 case 2:
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200180 unsafe_addr = arg2;
181 arg2 = (long) buf;
Alexei Starovoitov8d3b7dc2015-08-28 15:56:23 -0700182 break;
183 case 3:
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200184 unsafe_addr = arg3;
185 arg3 = (long) buf;
Alexei Starovoitov8d3b7dc2015-08-28 15:56:23 -0700186 break;
187 }
188 buf[0] = 0;
189 strncpy_from_unsafe(buf,
190 (void *) (long) unsafe_addr,
191 sizeof(buf));
192 }
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700193 continue;
194 }
195
196 if (fmt[i] == 'l') {
197 mod[fmt_cnt]++;
198 i++;
199 }
200
201 if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
202 return -EINVAL;
203 fmt_cnt++;
204 }
205
206 return __trace_printk(1/* fake ip will not be printed */, fmt,
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200207 mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
208 mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
209 mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700210}
211
212static const struct bpf_func_proto bpf_trace_printk_proto = {
213 .func = bpf_trace_printk,
214 .gpl_only = true,
215 .ret_type = RET_INTEGER,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -0800216 .arg1_type = ARG_PTR_TO_MEM,
217 .arg2_type = ARG_CONST_SIZE,
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700218};
219
Alexei Starovoitov0756ea32015-06-12 19:39:13 -0700220const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
221{
222 /*
223 * this program might be calling bpf_trace_printk,
224 * so allocate per-cpu printk buffers
225 */
226 trace_printk_init_buffers();
227
228 return &bpf_trace_printk_proto;
229}
230
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200231BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
Kaixu Xia35578d72015-08-06 07:02:35 +0000232{
Kaixu Xia35578d72015-08-06 07:02:35 +0000233 struct bpf_array *array = container_of(map, struct bpf_array, map);
Daniel Borkmann6816a7f2016-06-28 12:18:25 +0200234 unsigned int cpu = smp_processor_id();
235 u64 index = flags & BPF_F_INDEX_MASK;
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200236 struct bpf_event_entry *ee;
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700237 u64 value = 0;
238 int err;
Kaixu Xia35578d72015-08-06 07:02:35 +0000239
Daniel Borkmann6816a7f2016-06-28 12:18:25 +0200240 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
241 return -EINVAL;
242 if (index == BPF_F_CURRENT_CPU)
243 index = cpu;
Kaixu Xia35578d72015-08-06 07:02:35 +0000244 if (unlikely(index >= array->map.max_entries))
245 return -E2BIG;
246
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200247 ee = READ_ONCE(array->ptrs[index]);
Daniel Borkmann1ca1cc92016-06-28 12:18:23 +0200248 if (!ee)
Kaixu Xia35578d72015-08-06 07:02:35 +0000249 return -ENOENT;
250
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700251 err = perf_event_read_local(ee->event, &value);
Kaixu Xia35578d72015-08-06 07:02:35 +0000252 /*
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700253 * this api is ugly since we miss [-22..-2] range of valid
254 * counter values, but that's uapi
Kaixu Xia35578d72015-08-06 07:02:35 +0000255 */
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700256 if (err)
257 return err;
258 return value;
Kaixu Xia35578d72015-08-06 07:02:35 +0000259}
260
Alexei Starovoitov62544ce2015-10-22 17:10:14 -0700261static const struct bpf_func_proto bpf_perf_event_read_proto = {
Kaixu Xia35578d72015-08-06 07:02:35 +0000262 .func = bpf_perf_event_read,
Alexei Starovoitov1075ef52015-10-23 14:58:19 -0700263 .gpl_only = true,
Kaixu Xia35578d72015-08-06 07:02:35 +0000264 .ret_type = RET_INTEGER,
265 .arg1_type = ARG_CONST_MAP_PTR,
266 .arg2_type = ARG_ANYTHING,
267};
268
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200269static __always_inline u64
270__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
271 u64 flags, struct perf_raw_record *raw)
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700272{
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700273 struct bpf_array *array = container_of(map, struct bpf_array, map);
Daniel Borkmannd7931332016-06-28 12:18:24 +0200274 unsigned int cpu = smp_processor_id();
Daniel Borkmann1e337592016-04-18 21:01:23 +0200275 u64 index = flags & BPF_F_INDEX_MASK;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700276 struct perf_sample_data sample_data;
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200277 struct bpf_event_entry *ee;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700278 struct perf_event *event;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700279
Daniel Borkmann1e337592016-04-18 21:01:23 +0200280 if (index == BPF_F_CURRENT_CPU)
Daniel Borkmannd7931332016-06-28 12:18:24 +0200281 index = cpu;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700282 if (unlikely(index >= array->map.max_entries))
283 return -E2BIG;
284
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200285 ee = READ_ONCE(array->ptrs[index]);
Daniel Borkmann1ca1cc92016-06-28 12:18:23 +0200286 if (!ee)
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700287 return -ENOENT;
288
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200289 event = ee->event;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700290 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
291 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
292 return -EINVAL;
293
Daniel Borkmannd7931332016-06-28 12:18:24 +0200294 if (unlikely(event->oncpu != cpu))
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700295 return -EOPNOTSUPP;
296
297 perf_sample_data_init(&sample_data, 0, 0);
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200298 sample_data.raw = raw;
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700299 perf_event_output(event, &sample_data, regs);
300 return 0;
301}
302
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200303BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
304 u64, flags, void *, data, u64, size)
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200305{
Daniel Borkmann8e7a3922016-07-14 18:08:04 +0200306 struct perf_raw_record raw = {
307 .frag = {
308 .size = size,
309 .data = data,
310 },
311 };
312
313 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
314 return -EINVAL;
315
316 return __bpf_perf_event_output(regs, map, flags, &raw);
317}
318
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700319static const struct bpf_func_proto bpf_perf_event_output_proto = {
320 .func = bpf_perf_event_output,
Alexei Starovoitov1075ef52015-10-23 14:58:19 -0700321 .gpl_only = true,
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700322 .ret_type = RET_INTEGER,
323 .arg1_type = ARG_PTR_TO_CTX,
324 .arg2_type = ARG_CONST_MAP_PTR,
325 .arg3_type = ARG_ANYTHING,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -0800326 .arg4_type = ARG_PTR_TO_MEM,
327 .arg5_type = ARG_CONST_SIZE,
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700328};
329
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200330static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
331
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200332u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
333 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200334{
335 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200336 struct perf_raw_frag frag = {
337 .copy = ctx_copy,
338 .size = ctx_size,
339 .data = ctx,
340 };
341 struct perf_raw_record raw = {
342 .frag = {
Andrew Morton183fc152016-07-18 15:50:58 -0700343 {
344 .next = ctx_size ? &frag : NULL,
345 },
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200346 .size = meta_size,
347 .data = meta,
348 },
349 };
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200350
351 perf_fetch_caller_regs(regs);
352
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200353 return __bpf_perf_event_output(regs, map, flags, &raw);
Daniel Borkmannbd570ff2016-04-18 21:01:24 +0200354}
355
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200356BPF_CALL_0(bpf_get_current_task)
Alexei Starovoitov606274c2016-07-06 22:38:36 -0700357{
358 return (long) current;
359}
360
361static const struct bpf_func_proto bpf_get_current_task_proto = {
362 .func = bpf_get_current_task,
363 .gpl_only = true,
364 .ret_type = RET_INTEGER,
365};
366
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200367BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700368{
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700369 struct bpf_array *array = container_of(map, struct bpf_array, map);
370 struct cgroup *cgrp;
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700371
372 if (unlikely(in_interrupt()))
373 return -EINVAL;
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700374 if (unlikely(idx >= array->map.max_entries))
375 return -E2BIG;
376
377 cgrp = READ_ONCE(array->ptrs[idx]);
378 if (unlikely(!cgrp))
379 return -EAGAIN;
380
381 return task_under_cgroup_hierarchy(current, cgrp);
382}
383
384static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
385 .func = bpf_current_task_under_cgroup,
386 .gpl_only = false,
387 .ret_type = RET_INTEGER,
388 .arg1_type = ARG_CONST_MAP_PTR,
389 .arg2_type = ARG_ANYTHING,
390};
391
Gianluca Borelloa5e8c072017-01-18 17:55:49 +0000392BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
393 const void *, unsafe_ptr)
394{
395 int ret;
396
397 /*
398 * The strncpy_from_unsafe() call will likely not fill the entire
399 * buffer, but that's okay in this circumstance as we're probing
400 * arbitrary memory anyway similar to bpf_probe_read() and might
401 * as well probe the stack. Thus, memory is explicitly cleared
402 * only in error case, so that improper users ignoring return
403 * code altogether don't copy garbage; otherwise length of string
404 * is returned that can be used for bpf_perf_event_output() et al.
405 */
406 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
407 if (unlikely(ret < 0))
408 memset(dst, 0, size);
409
410 return ret;
411}
412
413static const struct bpf_func_proto bpf_probe_read_str_proto = {
414 .func = bpf_probe_read_str,
415 .gpl_only = true,
416 .ret_type = RET_INTEGER,
417 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
418 .arg2_type = ARG_CONST_SIZE,
419 .arg3_type = ARG_ANYTHING,
420};
421
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700422static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
Alexei Starovoitov25415172015-03-25 12:49:20 -0700423{
424 switch (func_id) {
425 case BPF_FUNC_map_lookup_elem:
426 return &bpf_map_lookup_elem_proto;
427 case BPF_FUNC_map_update_elem:
428 return &bpf_map_update_elem_proto;
429 case BPF_FUNC_map_delete_elem:
430 return &bpf_map_delete_elem_proto;
431 case BPF_FUNC_probe_read:
432 return &bpf_probe_read_proto;
Alexei Starovoitovd9847d32015-03-25 12:49:21 -0700433 case BPF_FUNC_ktime_get_ns:
434 return &bpf_ktime_get_ns_proto;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700435 case BPF_FUNC_tail_call:
436 return &bpf_tail_call_proto;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700437 case BPF_FUNC_get_current_pid_tgid:
438 return &bpf_get_current_pid_tgid_proto;
Alexei Starovoitov606274c2016-07-06 22:38:36 -0700439 case BPF_FUNC_get_current_task:
440 return &bpf_get_current_task_proto;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700441 case BPF_FUNC_get_current_uid_gid:
442 return &bpf_get_current_uid_gid_proto;
443 case BPF_FUNC_get_current_comm:
444 return &bpf_get_current_comm_proto;
Alexei Starovoitov9c959c82015-03-25 12:49:22 -0700445 case BPF_FUNC_trace_printk:
Alexei Starovoitov0756ea32015-06-12 19:39:13 -0700446 return bpf_get_trace_printk_proto();
Alexei Starovoitovab1973d2015-06-12 19:39:14 -0700447 case BPF_FUNC_get_smp_processor_id:
448 return &bpf_get_smp_processor_id_proto;
Daniel Borkmann2d0e30c2016-10-21 12:46:33 +0200449 case BPF_FUNC_get_numa_node_id:
450 return &bpf_get_numa_node_id_proto;
Kaixu Xia35578d72015-08-06 07:02:35 +0000451 case BPF_FUNC_perf_event_read:
452 return &bpf_perf_event_read_proto;
Sargun Dhillon96ae5222016-07-25 05:54:46 -0700453 case BPF_FUNC_probe_write_user:
454 return bpf_get_probe_write_proto();
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700455 case BPF_FUNC_current_task_under_cgroup:
456 return &bpf_current_task_under_cgroup_proto;
Alexei Starovoitov8937bd82016-08-11 18:17:18 -0700457 case BPF_FUNC_get_prandom_u32:
458 return &bpf_get_prandom_u32_proto;
Gianluca Borelloa5e8c072017-01-18 17:55:49 +0000459 case BPF_FUNC_probe_read_str:
460 return &bpf_probe_read_str_proto;
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700461 default:
462 return NULL;
463 }
464}
465
466static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
467{
468 switch (func_id) {
Alexei Starovoitova43eec32015-10-20 20:02:34 -0700469 case BPF_FUNC_perf_event_output:
470 return &bpf_perf_event_output_proto;
Alexei Starovoitovd5a3b1f2016-02-17 19:58:58 -0800471 case BPF_FUNC_get_stackid:
472 return &bpf_get_stackid_proto;
Alexei Starovoitov25415172015-03-25 12:49:20 -0700473 default:
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700474 return tracing_func_proto(func_id);
Alexei Starovoitov25415172015-03-25 12:49:20 -0700475 }
476}
477
478/* bpf+kprobe programs can access fields of 'struct pt_regs' */
Alexei Starovoitov19de99f2016-06-15 18:25:38 -0700479static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
480 enum bpf_reg_type *reg_type)
Alexei Starovoitov25415172015-03-25 12:49:20 -0700481{
Alexei Starovoitov25415172015-03-25 12:49:20 -0700482 if (off < 0 || off >= sizeof(struct pt_regs))
483 return false;
Alexei Starovoitov25415172015-03-25 12:49:20 -0700484 if (type != BPF_READ)
485 return false;
Alexei Starovoitov25415172015-03-25 12:49:20 -0700486 if (off % size != 0)
487 return false;
Daniel Borkmann2d071c62017-01-15 01:34:25 +0100488 /*
489 * Assertion for 32 bit to make sure last 8 byte access
490 * (BPF_DW) to the last 4 byte member is disallowed.
491 */
492 if (off + size > sizeof(struct pt_regs))
493 return false;
494
Alexei Starovoitov25415172015-03-25 12:49:20 -0700495 return true;
496}
497
Johannes Bergbe9370a2017-04-11 15:34:57 +0200498const struct bpf_verifier_ops kprobe_prog_ops = {
Alexei Starovoitov25415172015-03-25 12:49:20 -0700499 .get_func_proto = kprobe_prog_func_proto,
500 .is_valid_access = kprobe_prog_is_valid_access,
501};
502
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200503BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
504 u64, flags, void *, data, u64, size)
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700505{
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200506 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
507
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700508 /*
509 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
510 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200511 * from there and call the same bpf_perf_event_output() helper inline.
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700512 */
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200513 return ____bpf_perf_event_output(regs, map, flags, data, size);
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700514}
515
516static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
517 .func = bpf_perf_event_output_tp,
518 .gpl_only = true,
519 .ret_type = RET_INTEGER,
520 .arg1_type = ARG_PTR_TO_CTX,
521 .arg2_type = ARG_CONST_MAP_PTR,
522 .arg3_type = ARG_ANYTHING,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -0800523 .arg4_type = ARG_PTR_TO_MEM,
524 .arg5_type = ARG_CONST_SIZE,
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700525};
526
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200527BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
528 u64, flags)
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700529{
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200530 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700531
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200532 /*
533 * Same comment as in bpf_perf_event_output_tp(), only that this time
534 * the other helper's function body cannot be inlined due to being
535 * external, thus we need to call raw helper function.
536 */
537 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
538 flags, 0, 0);
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700539}
540
541static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
542 .func = bpf_get_stackid_tp,
543 .gpl_only = true,
544 .ret_type = RET_INTEGER,
545 .arg1_type = ARG_PTR_TO_CTX,
546 .arg2_type = ARG_CONST_MAP_PTR,
547 .arg3_type = ARG_ANYTHING,
548};
549
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700550static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
551{
552 switch (func_id) {
553 case BPF_FUNC_perf_event_output:
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700554 return &bpf_perf_event_output_proto_tp;
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700555 case BPF_FUNC_get_stackid:
Alexei Starovoitov9940d672016-04-06 18:43:27 -0700556 return &bpf_get_stackid_proto_tp;
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700557 default:
558 return tracing_func_proto(func_id);
559 }
560}
561
Alexei Starovoitov19de99f2016-06-15 18:25:38 -0700562static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
563 enum bpf_reg_type *reg_type)
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700564{
565 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
566 return false;
567 if (type != BPF_READ)
568 return false;
569 if (off % size != 0)
570 return false;
Daniel Borkmann2d071c62017-01-15 01:34:25 +0100571
572 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700573 return true;
574}
575
Johannes Bergbe9370a2017-04-11 15:34:57 +0200576const struct bpf_verifier_ops tracepoint_prog_ops = {
Alexei Starovoitov9fd82b612016-04-06 18:43:26 -0700577 .get_func_proto = tp_prog_func_proto,
578 .is_valid_access = tp_prog_is_valid_access,
579};
580
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700581static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
582 enum bpf_reg_type *reg_type)
583{
584 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
585 return false;
586 if (type != BPF_READ)
587 return false;
588 if (off % size != 0)
589 return false;
590 if (off == offsetof(struct bpf_perf_event_data, sample_period)) {
591 if (size != sizeof(u64))
592 return false;
593 } else {
594 if (size != sizeof(long))
595 return false;
596 }
597 return true;
598}
599
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +0100600static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
601 const struct bpf_insn *si,
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700602 struct bpf_insn *insn_buf,
603 struct bpf_prog *prog)
604{
605 struct bpf_insn *insn = insn_buf;
606
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +0100607 switch (si->off) {
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700608 case offsetof(struct bpf_perf_event_data, sample_period):
609 BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
Daniel Borkmannf035a512016-09-09 02:45:29 +0200610
611 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +0100612 data), si->dst_reg, si->src_reg,
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700613 offsetof(struct bpf_perf_event_data_kern, data));
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +0100614 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700615 offsetof(struct perf_sample_data, period));
616 break;
617 default:
Daniel Borkmannf035a512016-09-09 02:45:29 +0200618 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +0100619 regs), si->dst_reg, si->src_reg,
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700620 offsetof(struct bpf_perf_event_data_kern, regs));
Daniel Borkmann6b8cc1d2017-01-12 11:51:32 +0100621 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
622 si->off);
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700623 break;
624 }
625
626 return insn - insn_buf;
627}
628
Johannes Bergbe9370a2017-04-11 15:34:57 +0200629const struct bpf_verifier_ops perf_event_prog_ops = {
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700630 .get_func_proto = tp_prog_func_proto,
631 .is_valid_access = pe_prog_is_valid_access,
632 .convert_ctx_access = pe_prog_convert_ctx_access,
633};