Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2021 Facebook */ |
| 4 | /* Copyright (c) 2021 Google */ |
| 5 | |
| 6 | #include <assert.h> |
| 7 | #include <limits.h> |
| 8 | #include <unistd.h> |
| 9 | #include <sys/file.h> |
| 10 | #include <sys/time.h> |
| 11 | #include <sys/resource.h> |
| 12 | #include <linux/err.h> |
| 13 | #include <linux/zalloc.h> |
| 14 | #include <linux/perf_event.h> |
| 15 | #include <api/fs/fs.h> |
| 16 | #include <perf/bpf_perf.h> |
| 17 | |
| 18 | #include "affinity.h" |
| 19 | #include "bpf_counter.h" |
| 20 | #include "cgroup.h" |
| 21 | #include "counts.h" |
| 22 | #include "debug.h" |
| 23 | #include "evsel.h" |
| 24 | #include "evlist.h" |
| 25 | #include "target.h" |
| 26 | #include "cpumap.h" |
| 27 | #include "thread_map.h" |
| 28 | |
| 29 | #include "bpf_skel/bperf_cgroup.skel.h" |
| 30 | |
| 31 | static struct perf_event_attr cgrp_switch_attr = { |
| 32 | .type = PERF_TYPE_SOFTWARE, |
| 33 | .config = PERF_COUNT_SW_CGROUP_SWITCHES, |
| 34 | .size = sizeof(cgrp_switch_attr), |
| 35 | .sample_period = 1, |
| 36 | .disabled = 1, |
| 37 | }; |
| 38 | |
| 39 | static struct evsel *cgrp_switch; |
| 40 | static struct bperf_cgroup_bpf *skel; |
| 41 | |
| 42 | #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0)) |
| 43 | |
| 44 | static int bperf_load_program(struct evlist *evlist) |
| 45 | { |
| 46 | struct bpf_link *link; |
| 47 | struct evsel *evsel; |
| 48 | struct cgroup *cgrp, *leader_cgrp; |
| 49 | __u32 i, cpu; |
| 50 | __u32 nr_cpus = evlist->core.all_cpus->nr; |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 51 | int total_cpus = cpu__max_cpu().cpu; |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 52 | int map_size, map_fd; |
| 53 | int prog_fd, err; |
| 54 | |
| 55 | skel = bperf_cgroup_bpf__open(); |
| 56 | if (!skel) { |
| 57 | pr_err("Failed to open cgroup skeleton\n"); |
| 58 | return -1; |
| 59 | } |
| 60 | |
| 61 | skel->rodata->num_cpus = total_cpus; |
| 62 | skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups; |
| 63 | |
| 64 | BUG_ON(evlist->core.nr_entries % nr_cgroups != 0); |
| 65 | |
| 66 | /* we need one copy of events per cpu for reading */ |
| 67 | map_size = total_cpus * evlist->core.nr_entries / nr_cgroups; |
Muhammad Falak R Wani | ddf0d4d | 2021-08-15 16:06:10 +0530 | [diff] [blame] | 68 | bpf_map__set_max_entries(skel->maps.events, map_size); |
| 69 | bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups); |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 70 | /* previous result is saved in a per-cpu array */ |
| 71 | map_size = evlist->core.nr_entries / nr_cgroups; |
Muhammad Falak R Wani | ddf0d4d | 2021-08-15 16:06:10 +0530 | [diff] [blame] | 72 | bpf_map__set_max_entries(skel->maps.prev_readings, map_size); |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 73 | /* cgroup result needs all events (per-cpu) */ |
| 74 | map_size = evlist->core.nr_entries; |
Muhammad Falak R Wani | ddf0d4d | 2021-08-15 16:06:10 +0530 | [diff] [blame] | 75 | bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size); |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 76 | |
| 77 | set_max_rlimit(); |
| 78 | |
| 79 | err = bperf_cgroup_bpf__load(skel); |
| 80 | if (err) { |
| 81 | pr_err("Failed to load cgroup skeleton\n"); |
| 82 | goto out; |
| 83 | } |
| 84 | |
| 85 | if (cgroup_is_v2("perf_event") > 0) |
| 86 | skel->bss->use_cgroup_v2 = 1; |
| 87 | |
| 88 | err = -1; |
| 89 | |
| 90 | cgrp_switch = evsel__new(&cgrp_switch_attr); |
| 91 | if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) { |
| 92 | pr_err("Failed to open cgroup switches event\n"); |
| 93 | goto out; |
| 94 | } |
| 95 | |
| 96 | for (i = 0; i < nr_cpus; i++) { |
| 97 | link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch, |
| 98 | FD(cgrp_switch, i)); |
| 99 | if (IS_ERR(link)) { |
| 100 | pr_err("Failed to attach cgroup program\n"); |
| 101 | err = PTR_ERR(link); |
| 102 | goto out; |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * Update cgrp_idx map from cgroup-id to event index. |
| 108 | */ |
| 109 | cgrp = NULL; |
| 110 | i = 0; |
| 111 | |
| 112 | evlist__for_each_entry(evlist, evsel) { |
| 113 | if (cgrp == NULL || evsel->cgrp == leader_cgrp) { |
| 114 | leader_cgrp = evsel->cgrp; |
| 115 | evsel->cgrp = NULL; |
| 116 | |
| 117 | /* open single copy of the events w/o cgroup */ |
| 118 | err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1); |
| 119 | if (err) { |
| 120 | pr_err("Failed to open first cgroup events\n"); |
| 121 | goto out; |
| 122 | } |
| 123 | |
| 124 | map_fd = bpf_map__fd(skel->maps.events); |
| 125 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
| 126 | int fd = FD(evsel, cpu); |
Jiri Olsa | 38fe0e0 | 2021-07-06 17:16:59 +0200 | [diff] [blame] | 127 | __u32 idx = evsel->core.idx * total_cpus + |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 128 | evlist->core.all_cpus->map[cpu].cpu; |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 129 | |
| 130 | err = bpf_map_update_elem(map_fd, &idx, &fd, |
| 131 | BPF_ANY); |
| 132 | if (err < 0) { |
| 133 | pr_err("Failed to update perf_event fd\n"); |
| 134 | goto out; |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | evsel->cgrp = leader_cgrp; |
| 139 | } |
| 140 | evsel->supported = true; |
| 141 | |
| 142 | if (evsel->cgrp == cgrp) |
| 143 | continue; |
| 144 | |
| 145 | cgrp = evsel->cgrp; |
| 146 | |
| 147 | if (read_cgroup_id(cgrp) < 0) { |
| 148 | pr_err("Failed to get cgroup id\n"); |
| 149 | err = -1; |
| 150 | goto out; |
| 151 | } |
| 152 | |
| 153 | map_fd = bpf_map__fd(skel->maps.cgrp_idx); |
| 154 | err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY); |
| 155 | if (err < 0) { |
| 156 | pr_err("Failed to update cgroup index map\n"); |
| 157 | goto out; |
| 158 | } |
| 159 | |
| 160 | i++; |
| 161 | } |
| 162 | |
| 163 | /* |
| 164 | * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check |
| 165 | * whether the kernel support it |
| 166 | */ |
| 167 | prog_fd = bpf_program__fd(skel->progs.trigger_read); |
| 168 | err = bperf_trigger_reading(prog_fd, 0); |
| 169 | if (err) { |
| 170 | pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n" |
| 171 | "Therefore, --for-each-cgroup might show inaccurate readings\n"); |
| 172 | err = 0; |
| 173 | } |
| 174 | |
| 175 | out: |
| 176 | return err; |
| 177 | } |
| 178 | |
| 179 | static int bperf_cgrp__load(struct evsel *evsel, |
| 180 | struct target *target __maybe_unused) |
| 181 | { |
| 182 | static bool bperf_loaded = false; |
| 183 | |
| 184 | evsel->bperf_leader_prog_fd = -1; |
| 185 | evsel->bperf_leader_link_fd = -1; |
| 186 | |
| 187 | if (!bperf_loaded && bperf_load_program(evsel->evlist)) |
| 188 | return -1; |
| 189 | |
| 190 | bperf_loaded = true; |
| 191 | /* just to bypass bpf_counter_skip() */ |
| 192 | evsel->follower_skel = (struct bperf_follower_bpf *)skel; |
| 193 | |
| 194 | return 0; |
| 195 | } |
| 196 | |
| 197 | static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused, |
| 198 | int cpu __maybe_unused, int fd __maybe_unused) |
| 199 | { |
| 200 | /* nothing to do */ |
| 201 | return 0; |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * trigger the leader prog on each cpu, so the cgrp_reading map could get |
| 206 | * the latest results. |
| 207 | */ |
| 208 | static int bperf_cgrp__sync_counters(struct evlist *evlist) |
| 209 | { |
| 210 | int i, cpu; |
| 211 | int nr_cpus = evlist->core.all_cpus->nr; |
| 212 | int prog_fd = bpf_program__fd(skel->progs.trigger_read); |
| 213 | |
| 214 | for (i = 0; i < nr_cpus; i++) { |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 215 | cpu = evlist->core.all_cpus->map[i].cpu; |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 216 | bperf_trigger_reading(prog_fd, cpu); |
| 217 | } |
| 218 | |
| 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | static int bperf_cgrp__enable(struct evsel *evsel) |
| 223 | { |
Jiri Olsa | 38fe0e0 | 2021-07-06 17:16:59 +0200 | [diff] [blame] | 224 | if (evsel->core.idx) |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 225 | return 0; |
| 226 | |
| 227 | bperf_cgrp__sync_counters(evsel->evlist); |
| 228 | |
| 229 | skel->bss->enabled = 1; |
| 230 | return 0; |
| 231 | } |
| 232 | |
| 233 | static int bperf_cgrp__disable(struct evsel *evsel) |
| 234 | { |
Jiri Olsa | 38fe0e0 | 2021-07-06 17:16:59 +0200 | [diff] [blame] | 235 | if (evsel->core.idx) |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 236 | return 0; |
| 237 | |
| 238 | bperf_cgrp__sync_counters(evsel->evlist); |
| 239 | |
| 240 | skel->bss->enabled = 0; |
| 241 | return 0; |
| 242 | } |
| 243 | |
| 244 | static int bperf_cgrp__read(struct evsel *evsel) |
| 245 | { |
| 246 | struct evlist *evlist = evsel->evlist; |
| 247 | int i, cpu, nr_cpus = evlist->core.all_cpus->nr; |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 248 | int total_cpus = cpu__max_cpu().cpu; |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 249 | struct perf_counts_values *counts; |
| 250 | struct bpf_perf_event_value *values; |
| 251 | int reading_map_fd, err = 0; |
| 252 | __u32 idx; |
| 253 | |
Jiri Olsa | 38fe0e0 | 2021-07-06 17:16:59 +0200 | [diff] [blame] | 254 | if (evsel->core.idx) |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 255 | return 0; |
| 256 | |
| 257 | bperf_cgrp__sync_counters(evsel->evlist); |
| 258 | |
| 259 | values = calloc(total_cpus, sizeof(*values)); |
| 260 | if (values == NULL) |
| 261 | return -ENOMEM; |
| 262 | |
| 263 | reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings); |
| 264 | |
| 265 | evlist__for_each_entry(evlist, evsel) { |
Jiri Olsa | 38fe0e0 | 2021-07-06 17:16:59 +0200 | [diff] [blame] | 266 | idx = evsel->core.idx; |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 267 | err = bpf_map_lookup_elem(reading_map_fd, &idx, values); |
| 268 | if (err) { |
Masanari Iida | a2887b9 | 2021-12-25 09:55:58 +0900 | [diff] [blame] | 269 | pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n", |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 270 | idx, evsel__name(evsel), evsel->cgrp->name); |
| 271 | goto out; |
| 272 | } |
| 273 | |
| 274 | for (i = 0; i < nr_cpus; i++) { |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 275 | cpu = evlist->core.all_cpus->map[i].cpu; |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 276 | |
| 277 | counts = perf_counts(evsel->counts, i, 0); |
| 278 | counts->val = values[cpu].counter; |
| 279 | counts->ena = values[cpu].enabled; |
| 280 | counts->run = values[cpu].running; |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | out: |
| 285 | free(values); |
| 286 | return err; |
| 287 | } |
| 288 | |
| 289 | static int bperf_cgrp__destroy(struct evsel *evsel) |
| 290 | { |
Jiri Olsa | 38fe0e0 | 2021-07-06 17:16:59 +0200 | [diff] [blame] | 291 | if (evsel->core.idx) |
Namhyung Kim | 944138f | 2021-07-01 14:12:27 -0700 | [diff] [blame] | 292 | return 0; |
| 293 | |
| 294 | bperf_cgroup_bpf__destroy(skel); |
| 295 | evsel__delete(cgrp_switch); // it'll destroy on_switch progs too |
| 296 | |
| 297 | return 0; |
| 298 | } |
| 299 | |
| 300 | struct bpf_counter_ops bperf_cgrp_ops = { |
| 301 | .load = bperf_cgrp__load, |
| 302 | .enable = bperf_cgrp__enable, |
| 303 | .disable = bperf_cgrp__disable, |
| 304 | .read = bperf_cgrp__read, |
| 305 | .install_pe = bperf_cgrp__install_pe, |
| 306 | .destroy = bperf_cgrp__destroy, |
| 307 | }; |