blob: d756cc66eef32ae8716fb45ba0b554d5b24a4f3a [file] [log] [blame]
Namhyung Kim177f4ea2021-12-15 10:51:53 -08001#include <stdio.h>
2#include <fcntl.h>
3#include <stdint.h>
4#include <stdlib.h>
5
6#include <linux/err.h>
7
8#include "util/ftrace.h"
9#include "util/cpumap.h"
Namhyung Kim9c5c6052021-12-15 10:51:54 -080010#include "util/thread_map.h"
Namhyung Kim177f4ea2021-12-15 10:51:53 -080011#include "util/debug.h"
Namhyung Kim9c5c6052021-12-15 10:51:54 -080012#include "util/evlist.h"
Namhyung Kim177f4ea2021-12-15 10:51:53 -080013#include "util/bpf_counter.h"
14
15#include "util/bpf_skel/func_latency.skel.h"
16
17static struct func_latency_bpf *skel;
18
19int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
20{
Namhyung Kim9c5c6052021-12-15 10:51:54 -080021 int fd, err;
22 int i, ncpus = 1, ntasks = 1;
Namhyung Kim177f4ea2021-12-15 10:51:53 -080023 struct filter_entry *func;
24
25 if (!list_is_singular(&ftrace->filters)) {
26 pr_err("ERROR: %s target function(s).\n",
27 list_empty(&ftrace->filters) ? "No" : "Too many");
28 return -1;
29 }
30
31 func = list_first_entry(&ftrace->filters, struct filter_entry, list);
32
33 skel = func_latency_bpf__open();
34 if (!skel) {
35 pr_err("Failed to open func latency skeleton\n");
36 return -1;
37 }
38
Namhyung Kim9c5c6052021-12-15 10:51:54 -080039 /* don't need to set cpu filter for system-wide mode */
40 if (ftrace->target.cpu_list) {
41 ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus);
42 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
43 }
44
45 if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
46 ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
47 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
48 }
49
Namhyung Kim177f4ea2021-12-15 10:51:53 -080050 set_max_rlimit();
51
52 err = func_latency_bpf__load(skel);
53 if (err) {
54 pr_err("Failed to load func latency skeleton\n");
55 goto out;
56 }
57
Namhyung Kim9c5c6052021-12-15 10:51:54 -080058 if (ftrace->target.cpu_list) {
59 u32 cpu;
60 u8 val = 1;
61
62 skel->bss->has_cpu = 1;
63 fd = bpf_map__fd(skel->maps.cpu_filter);
64
65 for (i = 0; i < ncpus; i++) {
Ian Rogers6d188042022-01-04 22:13:51 -080066 cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu;
Namhyung Kim9c5c6052021-12-15 10:51:54 -080067 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
68 }
69 }
70
71 if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
72 u32 pid;
73 u8 val = 1;
74
75 skel->bss->has_task = 1;
76 fd = bpf_map__fd(skel->maps.task_filter);
77
78 for (i = 0; i < ntasks; i++) {
79 pid = perf_thread_map__pid(ftrace->evlist->core.threads, i);
80 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
81 }
82 }
83
Namhyung Kim177f4ea2021-12-15 10:51:53 -080084 skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
85 false, func->name);
86 if (IS_ERR(skel->links.func_begin)) {
87 pr_err("Failed to attach fentry program\n");
88 err = PTR_ERR(skel->links.func_begin);
89 goto out;
90 }
91
92 skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
93 true, func->name);
94 if (IS_ERR(skel->links.func_end)) {
95 pr_err("Failed to attach fexit program\n");
96 err = PTR_ERR(skel->links.func_end);
97 goto out;
98 }
99
100 /* XXX: we don't actually use this fd - just for poll() */
101 return open("/dev/null", O_RDONLY);
102
103out:
104 return err;
105}
106
107int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
108{
109 skel->bss->enabled = 1;
110 return 0;
111}
112
113int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
114{
115 skel->bss->enabled = 0;
116 return 0;
117}
118
119int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
120 int buckets[])
121{
122 int i, fd, err;
123 u32 idx;
124 u64 *hist;
Ian Rogers6d188042022-01-04 22:13:51 -0800125 int ncpus = cpu__max_cpu().cpu;
Namhyung Kim177f4ea2021-12-15 10:51:53 -0800126
127 fd = bpf_map__fd(skel->maps.latency);
128
129 hist = calloc(ncpus, sizeof(*hist));
130 if (hist == NULL)
131 return -ENOMEM;
132
133 for (idx = 0; idx < NUM_BUCKET; idx++) {
134 err = bpf_map_lookup_elem(fd, &idx, hist);
135 if (err) {
136 buckets[idx] = 0;
137 continue;
138 }
139
140 for (i = 0; i < ncpus; i++)
141 buckets[idx] += hist[i];
142 }
143
144 free(hist);
145 return 0;
146}
147
148int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
149{
150 func_latency_bpf__destroy(skel);
151 return 0;
152}