Namhyung Kim | 177f4ea | 2021-12-15 10:51:53 -0800 | [diff] [blame] | 1 | #include <stdio.h> |
| 2 | #include <fcntl.h> |
| 3 | #include <stdint.h> |
| 4 | #include <stdlib.h> |
| 5 | |
| 6 | #include <linux/err.h> |
| 7 | |
| 8 | #include "util/ftrace.h" |
| 9 | #include "util/cpumap.h" |
Namhyung Kim | 9c5c605 | 2021-12-15 10:51:54 -0800 | [diff] [blame] | 10 | #include "util/thread_map.h" |
Namhyung Kim | 177f4ea | 2021-12-15 10:51:53 -0800 | [diff] [blame] | 11 | #include "util/debug.h" |
Namhyung Kim | 9c5c605 | 2021-12-15 10:51:54 -0800 | [diff] [blame] | 12 | #include "util/evlist.h" |
Namhyung Kim | 177f4ea | 2021-12-15 10:51:53 -0800 | [diff] [blame] | 13 | #include "util/bpf_counter.h" |
| 14 | |
| 15 | #include "util/bpf_skel/func_latency.skel.h" |
| 16 | |
| 17 | static struct func_latency_bpf *skel; |
| 18 | |
| 19 | int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) |
| 20 | { |
Namhyung Kim | 9c5c605 | 2021-12-15 10:51:54 -0800 | [diff] [blame] | 21 | int fd, err; |
| 22 | int i, ncpus = 1, ntasks = 1; |
Namhyung Kim | 177f4ea | 2021-12-15 10:51:53 -0800 | [diff] [blame] | 23 | struct filter_entry *func; |
| 24 | |
| 25 | if (!list_is_singular(&ftrace->filters)) { |
| 26 | pr_err("ERROR: %s target function(s).\n", |
| 27 | list_empty(&ftrace->filters) ? "No" : "Too many"); |
| 28 | return -1; |
| 29 | } |
| 30 | |
| 31 | func = list_first_entry(&ftrace->filters, struct filter_entry, list); |
| 32 | |
| 33 | skel = func_latency_bpf__open(); |
| 34 | if (!skel) { |
| 35 | pr_err("Failed to open func latency skeleton\n"); |
| 36 | return -1; |
| 37 | } |
| 38 | |
Namhyung Kim | 9c5c605 | 2021-12-15 10:51:54 -0800 | [diff] [blame] | 39 | /* don't need to set cpu filter for system-wide mode */ |
| 40 | if (ftrace->target.cpu_list) { |
| 41 | ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus); |
| 42 | bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); |
| 43 | } |
| 44 | |
| 45 | if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) { |
| 46 | ntasks = perf_thread_map__nr(ftrace->evlist->core.threads); |
| 47 | bpf_map__set_max_entries(skel->maps.task_filter, ntasks); |
| 48 | } |
| 49 | |
Namhyung Kim | 177f4ea | 2021-12-15 10:51:53 -0800 | [diff] [blame] | 50 | set_max_rlimit(); |
| 51 | |
| 52 | err = func_latency_bpf__load(skel); |
| 53 | if (err) { |
| 54 | pr_err("Failed to load func latency skeleton\n"); |
| 55 | goto out; |
| 56 | } |
| 57 | |
Namhyung Kim | 9c5c605 | 2021-12-15 10:51:54 -0800 | [diff] [blame] | 58 | if (ftrace->target.cpu_list) { |
| 59 | u32 cpu; |
| 60 | u8 val = 1; |
| 61 | |
| 62 | skel->bss->has_cpu = 1; |
| 63 | fd = bpf_map__fd(skel->maps.cpu_filter); |
| 64 | |
| 65 | for (i = 0; i < ncpus; i++) { |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame^] | 66 | cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu; |
Namhyung Kim | 9c5c605 | 2021-12-15 10:51:54 -0800 | [diff] [blame] | 67 | bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) { |
| 72 | u32 pid; |
| 73 | u8 val = 1; |
| 74 | |
| 75 | skel->bss->has_task = 1; |
| 76 | fd = bpf_map__fd(skel->maps.task_filter); |
| 77 | |
| 78 | for (i = 0; i < ntasks; i++) { |
| 79 | pid = perf_thread_map__pid(ftrace->evlist->core.threads, i); |
| 80 | bpf_map_update_elem(fd, &pid, &val, BPF_ANY); |
| 81 | } |
| 82 | } |
| 83 | |
Namhyung Kim | 177f4ea | 2021-12-15 10:51:53 -0800 | [diff] [blame] | 84 | skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin, |
| 85 | false, func->name); |
| 86 | if (IS_ERR(skel->links.func_begin)) { |
| 87 | pr_err("Failed to attach fentry program\n"); |
| 88 | err = PTR_ERR(skel->links.func_begin); |
| 89 | goto out; |
| 90 | } |
| 91 | |
| 92 | skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end, |
| 93 | true, func->name); |
| 94 | if (IS_ERR(skel->links.func_end)) { |
| 95 | pr_err("Failed to attach fexit program\n"); |
| 96 | err = PTR_ERR(skel->links.func_end); |
| 97 | goto out; |
| 98 | } |
| 99 | |
| 100 | /* XXX: we don't actually use this fd - just for poll() */ |
| 101 | return open("/dev/null", O_RDONLY); |
| 102 | |
| 103 | out: |
| 104 | return err; |
| 105 | } |
| 106 | |
| 107 | int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused) |
| 108 | { |
| 109 | skel->bss->enabled = 1; |
| 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused) |
| 114 | { |
| 115 | skel->bss->enabled = 0; |
| 116 | return 0; |
| 117 | } |
| 118 | |
| 119 | int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused, |
| 120 | int buckets[]) |
| 121 | { |
| 122 | int i, fd, err; |
| 123 | u32 idx; |
| 124 | u64 *hist; |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame^] | 125 | int ncpus = cpu__max_cpu().cpu; |
Namhyung Kim | 177f4ea | 2021-12-15 10:51:53 -0800 | [diff] [blame] | 126 | |
| 127 | fd = bpf_map__fd(skel->maps.latency); |
| 128 | |
| 129 | hist = calloc(ncpus, sizeof(*hist)); |
| 130 | if (hist == NULL) |
| 131 | return -ENOMEM; |
| 132 | |
| 133 | for (idx = 0; idx < NUM_BUCKET; idx++) { |
| 134 | err = bpf_map_lookup_elem(fd, &idx, hist); |
| 135 | if (err) { |
| 136 | buckets[idx] = 0; |
| 137 | continue; |
| 138 | } |
| 139 | |
| 140 | for (i = 0; i < ncpus; i++) |
| 141 | buckets[idx] += hist[i]; |
| 142 | } |
| 143 | |
| 144 | free(hist); |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused) |
| 149 | { |
| 150 | func_latency_bpf__destroy(skel); |
| 151 | return 0; |
| 152 | } |