Thomas Gleixner | 9100704 | 2019-05-29 07:12:25 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> |
| 4 | * |
| 5 | * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further |
| 6 | * copyright notes. |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <sys/mman.h> |
Arnaldo Carvalho de Melo | 73c17d8 | 2017-10-06 10:46:01 -0300 | [diff] [blame] | 10 | #include <inttypes.h> |
| 11 | #include <asm/bug.h> |
Arnaldo Carvalho de Melo | 7f7c536 | 2019-07-04 11:32:27 -0300 | [diff] [blame] | 12 | #include <linux/zalloc.h> |
Arnaldo Carvalho de Melo | f2a39fe | 2019-08-30 14:45:20 -0300 | [diff] [blame] | 13 | #include <stdlib.h> |
| 14 | #include <string.h> |
Arnaldo Carvalho de Melo | 7634d53 | 2019-09-23 18:06:52 -0300 | [diff] [blame] | 15 | #include <unistd.h> // sysconf() |
Jiri Olsa | 7728fa0 | 2019-10-07 14:53:17 +0200 | [diff] [blame] | 16 | #include <perf/mmap.h> |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 17 | #ifdef HAVE_LIBNUMA_SUPPORT |
| 18 | #include <numaif.h> |
| 19 | #endif |
Arnaldo Carvalho de Melo | f2a39fe | 2019-08-30 14:45:20 -0300 | [diff] [blame] | 20 | #include "cpumap.h" |
Arnaldo Carvalho de Melo | 73c17d8 | 2017-10-06 10:46:01 -0300 | [diff] [blame] | 21 | #include "debug.h" |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 22 | #include "event.h" |
| 23 | #include "mmap.h" |
Arnaldo Carvalho de Melo | c1a604d | 2019-08-29 15:20:59 -0300 | [diff] [blame] | 24 | #include "../perf.h" |
Jiri Olsa | 20f2be1 | 2019-08-06 15:25:25 +0200 | [diff] [blame] | 25 | #include <internal/lib.h> /* page_size */ |
Alexey Budankov | 9c080c0 | 2019-12-03 14:44:18 +0300 | [diff] [blame] | 26 | #include <linux/bitmap.h> |
| 27 | |
| 28 | #define MASK_SIZE 1023 |
| 29 | void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag) |
| 30 | { |
| 31 | char buf[MASK_SIZE + 1]; |
| 32 | size_t len; |
| 33 | |
| 34 | len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); |
| 35 | buf[len] = '\0'; |
| 36 | pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); |
| 37 | } |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 38 | |
Jiri Olsa | bf59b30 | 2019-10-07 14:53:11 +0200 | [diff] [blame] | 39 | size_t mmap__mmap_len(struct mmap *map) |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 40 | { |
Jiri Olsa | bf59b30 | 2019-10-07 14:53:11 +0200 | [diff] [blame] | 41 | return perf_mmap__mmap_len(&map->core); |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 42 | } |
| 43 | |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 44 | int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, |
| 45 | struct auxtrace_mmap_params *mp __maybe_unused, |
| 46 | void *userpg __maybe_unused, |
| 47 | int fd __maybe_unused) |
| 48 | { |
| 49 | return 0; |
| 50 | } |
| 51 | |
| 52 | void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) |
| 53 | { |
| 54 | } |
| 55 | |
| 56 | void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused, |
| 57 | off_t auxtrace_offset __maybe_unused, |
| 58 | unsigned int auxtrace_pages __maybe_unused, |
| 59 | bool auxtrace_overwrite __maybe_unused) |
| 60 | { |
| 61 | } |
| 62 | |
| 63 | void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused, |
Jiri Olsa | 63503db | 2019-07-21 13:23:52 +0200 | [diff] [blame] | 64 | struct evlist *evlist __maybe_unused, |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 65 | int idx __maybe_unused, |
| 66 | bool per_cpu __maybe_unused) |
| 67 | { |
| 68 | } |
| 69 | |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 70 | #ifdef HAVE_AIO_SUPPORT |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 71 | static int perf_mmap__aio_enabled(struct mmap *map) |
Alexey Budankov | 51255a8 | 2019-03-18 20:42:19 +0300 | [diff] [blame] | 72 | { |
| 73 | return map->aio.nr_cblocks > 0; |
| 74 | } |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 75 | |
| 76 | #ifdef HAVE_LIBNUMA_SUPPORT |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 77 | static int perf_mmap__aio_alloc(struct mmap *map, int idx) |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 78 | { |
Jiri Olsa | bf59b30 | 2019-10-07 14:53:11 +0200 | [diff] [blame] | 79 | map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 80 | MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); |
| 81 | if (map->aio.data[idx] == MAP_FAILED) { |
| 82 | map->aio.data[idx] = NULL; |
| 83 | return -1; |
| 84 | } |
| 85 | |
| 86 | return 0; |
| 87 | } |
| 88 | |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 89 | static void perf_mmap__aio_free(struct mmap *map, int idx) |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 90 | { |
| 91 | if (map->aio.data[idx]) { |
Jiri Olsa | bf59b30 | 2019-10-07 14:53:11 +0200 | [diff] [blame] | 92 | munmap(map->aio.data[idx], mmap__mmap_len(map)); |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 93 | map->aio.data[idx] = NULL; |
| 94 | } |
| 95 | } |
| 96 | |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 97 | static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 98 | { |
| 99 | void *data; |
| 100 | size_t mmap_len; |
Alexey Budankov | 44d462a | 2020-03-12 15:21:45 +0300 | [diff] [blame] | 101 | unsigned long *node_mask; |
| 102 | unsigned long node_index; |
| 103 | int err = 0; |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 104 | |
| 105 | if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { |
| 106 | data = map->aio.data[idx]; |
Jiri Olsa | bf59b30 | 2019-10-07 14:53:11 +0200 | [diff] [blame] | 107 | mmap_len = mmap__mmap_len(map); |
Alexey Budankov | 44d462a | 2020-03-12 15:21:45 +0300 | [diff] [blame] | 108 | node_index = cpu__get_node(cpu); |
Andy Shevchenko | 7fc5b57 | 2021-09-07 19:59:35 -0700 | [diff] [blame] | 109 | node_mask = bitmap_zalloc(node_index + 1); |
Alexey Budankov | 44d462a | 2020-03-12 15:21:45 +0300 | [diff] [blame] | 110 | if (!node_mask) { |
| 111 | pr_err("Failed to allocate node mask for mbind: error %m\n"); |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 112 | return -1; |
| 113 | } |
Alexey Budankov | 44d462a | 2020-03-12 15:21:45 +0300 | [diff] [blame] | 114 | set_bit(node_index, node_mask); |
| 115 | if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) { |
| 116 | pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n", |
| 117 | data, data + mmap_len, node_index); |
| 118 | err = -1; |
| 119 | } |
| 120 | bitmap_free(node_mask); |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 121 | } |
| 122 | |
Alexey Budankov | 44d462a | 2020-03-12 15:21:45 +0300 | [diff] [blame] | 123 | return err; |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 124 | } |
Alexey Budankov | 51255a8 | 2019-03-18 20:42:19 +0300 | [diff] [blame] | 125 | #else /* !HAVE_LIBNUMA_SUPPORT */ |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 126 | static int perf_mmap__aio_alloc(struct mmap *map, int idx) |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 127 | { |
Jiri Olsa | bf59b30 | 2019-10-07 14:53:11 +0200 | [diff] [blame] | 128 | map->aio.data[idx] = malloc(mmap__mmap_len(map)); |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 129 | if (map->aio.data[idx] == NULL) |
| 130 | return -1; |
| 131 | |
| 132 | return 0; |
| 133 | } |
| 134 | |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 135 | static void perf_mmap__aio_free(struct mmap *map, int idx) |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 136 | { |
| 137 | zfree(&(map->aio.data[idx])); |
| 138 | } |
| 139 | |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 140 | static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 141 | struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 142 | { |
| 143 | return 0; |
| 144 | } |
| 145 | #endif |
| 146 | |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 147 | static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp) |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 148 | { |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 149 | int delta_max, i, prio, ret; |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 150 | |
Alexey Budankov | d3d1af6 | 2018-11-06 12:04:58 +0300 | [diff] [blame] | 151 | map->aio.nr_cblocks = mp->nr_cblocks; |
| 152 | if (map->aio.nr_cblocks) { |
Alexey Budankov | 93f20c0 | 2018-11-06 12:07:19 +0300 | [diff] [blame] | 153 | map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); |
| 154 | if (!map->aio.aiocb) { |
| 155 | pr_debug2("failed to allocate aiocb for data buffer, error %m\n"); |
| 156 | return -1; |
| 157 | } |
| 158 | map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); |
| 159 | if (!map->aio.cblocks) { |
| 160 | pr_debug2("failed to allocate cblocks for data buffer, error %m\n"); |
| 161 | return -1; |
| 162 | } |
| 163 | map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 164 | if (!map->aio.data) { |
| 165 | pr_debug2("failed to allocate data buffer, error %m\n"); |
| 166 | return -1; |
| 167 | } |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 168 | delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); |
Alexey Budankov | 93f20c0 | 2018-11-06 12:07:19 +0300 | [diff] [blame] | 169 | for (i = 0; i < map->aio.nr_cblocks; ++i) { |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 170 | ret = perf_mmap__aio_alloc(map, i); |
| 171 | if (ret == -1) { |
Alexey Budankov | 93f20c0 | 2018-11-06 12:07:19 +0300 | [diff] [blame] | 172 | pr_debug2("failed to allocate data buffer area, error %m"); |
| 173 | return -1; |
| 174 | } |
Jiri Olsa | 56a9470 | 2019-07-27 22:33:20 +0200 | [diff] [blame] | 175 | ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 176 | if (ret == -1) |
| 177 | return -1; |
Alexey Budankov | 93f20c0 | 2018-11-06 12:07:19 +0300 | [diff] [blame] | 178 | /* |
| 179 | * Use cblock.aio_fildes value different from -1 |
| 180 | * to denote started aio write operation on the |
| 181 | * cblock so it requires explicit record__aio_sync() |
| 182 | * call prior the cblock may be reused again. |
| 183 | */ |
| 184 | map->aio.cblocks[i].aio_fildes = -1; |
| 185 | /* |
| 186 | * Allocate cblocks with priority delta to have |
| 187 | * faster aio write system calls because queued requests |
| 188 | * are kept in separate per-prio queues and adding |
| 189 | * a new request will iterate thru shorter per-prio |
| 190 | * list. Blocks with numbers higher than |
| 191 | * _SC_AIO_PRIO_DELTA_MAX go with priority 0. |
| 192 | */ |
| 193 | prio = delta_max - i; |
| 194 | map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; |
| 195 | } |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | return 0; |
| 199 | } |
| 200 | |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 201 | static void perf_mmap__aio_munmap(struct mmap *map) |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 202 | { |
Alexey Budankov | c8dd6ee | 2018-12-05 20:19:41 +0300 | [diff] [blame] | 203 | int i; |
| 204 | |
| 205 | for (i = 0; i < map->aio.nr_cblocks; ++i) |
Alexey Budankov | c44a8b4 | 2019-01-22 20:48:54 +0300 | [diff] [blame] | 206 | perf_mmap__aio_free(map, i); |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 207 | if (map->aio.data) |
| 208 | zfree(&map->aio.data); |
Alexey Budankov | c8dd6ee | 2018-12-05 20:19:41 +0300 | [diff] [blame] | 209 | zfree(&map->aio.cblocks); |
| 210 | zfree(&map->aio.aiocb); |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 211 | } |
Alexey Budankov | 51255a8 | 2019-03-18 20:42:19 +0300 | [diff] [blame] | 212 | #else /* !HAVE_AIO_SUPPORT */ |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 213 | static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused) |
Alexey Budankov | 51255a8 | 2019-03-18 20:42:19 +0300 | [diff] [blame] | 214 | { |
| 215 | return 0; |
| 216 | } |
| 217 | |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 218 | static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused, |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 219 | struct mmap_params *mp __maybe_unused) |
| 220 | { |
| 221 | return 0; |
| 222 | } |
| 223 | |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 224 | static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 225 | { |
| 226 | } |
| 227 | #endif |
| 228 | |
Jiri Olsa | 59d7ea62 | 2019-10-07 14:53:14 +0200 | [diff] [blame] | 229 | void mmap__munmap(struct mmap *map) |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 230 | { |
Alexey Budankov | 8384a26 | 2019-12-03 14:45:27 +0300 | [diff] [blame] | 231 | bitmap_free(map->affinity_mask.bits); |
| 232 | |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 233 | perf_mmap__aio_munmap(map); |
Alexey Budankov | 51255a8 | 2019-03-18 20:42:19 +0300 | [diff] [blame] | 234 | if (map->data != NULL) { |
Jiri Olsa | bf59b30 | 2019-10-07 14:53:11 +0200 | [diff] [blame] | 235 | munmap(map->data, mmap__mmap_len(map)); |
Alexey Budankov | 51255a8 | 2019-03-18 20:42:19 +0300 | [diff] [blame] | 236 | map->data = NULL; |
| 237 | } |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 238 | auxtrace_mmap__munmap(&map->auxtrace_mmap); |
| 239 | } |
| 240 | |
Alexey Budankov | 8384a26 | 2019-12-03 14:45:27 +0300 | [diff] [blame] | 241 | static void build_node_mask(int node, struct mmap_cpu_mask *mask) |
Alexey Budankov | f13de66 | 2019-01-22 20:50:57 +0300 | [diff] [blame] | 242 | { |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 243 | int idx, nr_cpus; |
| 244 | struct perf_cpu cpu; |
Jiri Olsa | f854839 | 2019-07-21 13:23:49 +0200 | [diff] [blame] | 245 | const struct perf_cpu_map *cpu_map = NULL; |
Alexey Budankov | f13de66 | 2019-01-22 20:50:57 +0300 | [diff] [blame] | 246 | |
| 247 | cpu_map = cpu_map__online(); |
| 248 | if (!cpu_map) |
| 249 | return; |
| 250 | |
Jiri Olsa | 6549cd8 | 2019-08-22 13:11:38 +0200 | [diff] [blame] | 251 | nr_cpus = perf_cpu_map__nr(cpu_map); |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 252 | for (idx = 0; idx < nr_cpus; idx++) { |
Ian Rogers | 4402869 | 2022-01-21 20:58:10 -0800 | [diff] [blame] | 253 | cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ |
Alexey Budankov | f13de66 | 2019-01-22 20:50:57 +0300 | [diff] [blame] | 254 | if (cpu__get_node(cpu) == node) |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 255 | set_bit(cpu.cpu, mask->bits); |
Alexey Budankov | f13de66 | 2019-01-22 20:50:57 +0300 | [diff] [blame] | 256 | } |
| 257 | } |
| 258 | |
Alexey Budankov | 8384a26 | 2019-12-03 14:45:27 +0300 | [diff] [blame] | 259 | static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) |
Alexey Budankov | f13de66 | 2019-01-22 20:50:57 +0300 | [diff] [blame] | 260 | { |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 261 | map->affinity_mask.nbits = cpu__max_cpu().cpu; |
Andy Shevchenko | 7fc5b57 | 2021-09-07 19:59:35 -0700 | [diff] [blame] | 262 | map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits); |
Alexey Budankov | 8384a26 | 2019-12-03 14:45:27 +0300 | [diff] [blame] | 263 | if (!map->affinity_mask.bits) |
| 264 | return -1; |
| 265 | |
Alexey Budankov | f13de66 | 2019-01-22 20:50:57 +0300 | [diff] [blame] | 266 | if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) |
Jiri Olsa | 56a9470 | 2019-07-27 22:33:20 +0200 | [diff] [blame] | 267 | build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); |
Alexey Budankov | f13de66 | 2019-01-22 20:50:57 +0300 | [diff] [blame] | 268 | else if (mp->affinity == PERF_AFFINITY_CPU) |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 269 | set_bit(map->core.cpu.cpu, map->affinity_mask.bits); |
Alexey Budankov | 8384a26 | 2019-12-03 14:45:27 +0300 | [diff] [blame] | 270 | |
| 271 | return 0; |
Alexey Budankov | f13de66 | 2019-01-22 20:50:57 +0300 | [diff] [blame] | 272 | } |
| 273 | |
Ian Rogers | 6d18804 | 2022-01-04 22:13:51 -0800 | [diff] [blame] | 274 | int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu) |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 275 | { |
Jiri Olsa | 32c261c | 2019-10-07 14:53:12 +0200 | [diff] [blame] | 276 | if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 277 | pr_debug2("failed to mmap perf event ring buffer, error %d\n", |
| 278 | errno); |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 279 | return -1; |
| 280 | } |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 281 | |
Alexey Budankov | 8384a26 | 2019-12-03 14:45:27 +0300 | [diff] [blame] | 282 | if (mp->affinity != PERF_AFFINITY_SYS && |
| 283 | perf_mmap__setup_affinity_mask(map, mp)) { |
| 284 | pr_debug2("failed to alloc mmap affinity mask, error %d\n", |
| 285 | errno); |
| 286 | return -1; |
| 287 | } |
| 288 | |
| 289 | if (verbose == 2) |
| 290 | mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap"); |
Alexey Budankov | 9d2ed64 | 2019-01-22 20:47:43 +0300 | [diff] [blame] | 291 | |
Jiri Olsa | 65aa2e6 | 2019-08-27 16:05:18 +0200 | [diff] [blame] | 292 | map->core.flush = mp->flush; |
Alexey Budankov | 470530b | 2019-03-18 20:40:26 +0300 | [diff] [blame] | 293 | |
Alexey Budankov | 51255a8 | 2019-03-18 20:42:19 +0300 | [diff] [blame] | 294 | map->comp_level = mp->comp_level; |
| 295 | |
| 296 | if (map->comp_level && !perf_mmap__aio_enabled(map)) { |
Jiri Olsa | bf59b30 | 2019-10-07 14:53:11 +0200 | [diff] [blame] | 297 | map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, |
Alexey Budankov | 51255a8 | 2019-03-18 20:42:19 +0300 | [diff] [blame] | 298 | MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); |
| 299 | if (map->data == MAP_FAILED) { |
| 300 | pr_debug2("failed to mmap data buffer, error %d\n", |
| 301 | errno); |
| 302 | map->data = NULL; |
| 303 | return -1; |
| 304 | } |
| 305 | } |
| 306 | |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 307 | if (auxtrace_mmap__mmap(&map->auxtrace_mmap, |
Jiri Olsa | 547740f | 2019-07-27 22:07:44 +0200 | [diff] [blame] | 308 | &mp->auxtrace_mp, map->core.base, fd)) |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 309 | return -1; |
| 310 | |
Alexey Budankov | 0b77383 | 2018-11-06 12:03:35 +0300 | [diff] [blame] | 311 | return perf_mmap__aio_mmap(map, mp); |
Arnaldo Carvalho de Melo | 1695849 | 2017-10-06 10:31:47 -0300 | [diff] [blame] | 312 | } |
Arnaldo Carvalho de Melo | 73c17d8 | 2017-10-06 10:46:01 -0300 | [diff] [blame] | 313 | |
Jiri Olsa | a583053 | 2019-07-27 20:30:53 +0200 | [diff] [blame] | 314 | int perf_mmap__push(struct mmap *md, void *to, |
| 315 | int push(struct mmap *map, void *to, void *buf, size_t size)) |
Kan Liang | 8872481 | 2018-01-18 13:26:19 -0800 | [diff] [blame] | 316 | { |
Jiri Olsa | 7728fa0 | 2019-10-07 14:53:17 +0200 | [diff] [blame] | 317 | u64 head = perf_mmap__read_head(&md->core); |
Jiri Olsa | 547740f | 2019-07-27 22:07:44 +0200 | [diff] [blame] | 318 | unsigned char *data = md->core.base + page_size; |
Kan Liang | 8872481 | 2018-01-18 13:26:19 -0800 | [diff] [blame] | 319 | unsigned long size; |
| 320 | void *buf; |
| 321 | int rc = 0; |
| 322 | |
Jiri Olsa | 7c4d418 | 2019-10-07 14:53:18 +0200 | [diff] [blame] | 323 | rc = perf_mmap__read_init(&md->core); |
Kan Liang | 189f2cc | 2018-01-18 13:26:20 -0800 | [diff] [blame] | 324 | if (rc < 0) |
Alexey Budankov | ef78112 | 2019-03-18 20:44:12 +0300 | [diff] [blame] | 325 | return (rc == -EAGAIN) ? 1 : -1; |
Kan Liang | 8872481 | 2018-01-18 13:26:19 -0800 | [diff] [blame] | 326 | |
Jiri Olsa | ebe4d72 | 2019-07-27 22:39:53 +0200 | [diff] [blame] | 327 | size = md->core.end - md->core.start; |
Kan Liang | dc6c35c | 2018-01-18 13:26:17 -0800 | [diff] [blame] | 328 | |
Jiri Olsa | ebe4d72 | 2019-07-27 22:39:53 +0200 | [diff] [blame] | 329 | if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { |
| 330 | buf = &data[md->core.start & md->core.mask]; |
| 331 | size = md->core.mask + 1 - (md->core.start & md->core.mask); |
| 332 | md->core.start += size; |
Arnaldo Carvalho de Melo | 73c17d8 | 2017-10-06 10:46:01 -0300 | [diff] [blame] | 333 | |
Jiri Olsa | ded2b8f | 2018-09-13 14:54:06 +0200 | [diff] [blame] | 334 | if (push(md, to, buf, size) < 0) { |
Arnaldo Carvalho de Melo | 73c17d8 | 2017-10-06 10:46:01 -0300 | [diff] [blame] | 335 | rc = -1; |
| 336 | goto out; |
| 337 | } |
| 338 | } |
| 339 | |
Jiri Olsa | ebe4d72 | 2019-07-27 22:39:53 +0200 | [diff] [blame] | 340 | buf = &data[md->core.start & md->core.mask]; |
| 341 | size = md->core.end - md->core.start; |
| 342 | md->core.start += size; |
Arnaldo Carvalho de Melo | 73c17d8 | 2017-10-06 10:46:01 -0300 | [diff] [blame] | 343 | |
Jiri Olsa | ded2b8f | 2018-09-13 14:54:06 +0200 | [diff] [blame] | 344 | if (push(md, to, buf, size) < 0) { |
Arnaldo Carvalho de Melo | 73c17d8 | 2017-10-06 10:46:01 -0300 | [diff] [blame] | 345 | rc = -1; |
| 346 | goto out; |
| 347 | } |
| 348 | |
Jiri Olsa | ebe4d72 | 2019-07-27 22:39:53 +0200 | [diff] [blame] | 349 | md->core.prev = head; |
Jiri Olsa | 7728fa0 | 2019-10-07 14:53:17 +0200 | [diff] [blame] | 350 | perf_mmap__consume(&md->core); |
Arnaldo Carvalho de Melo | 73c17d8 | 2017-10-06 10:46:01 -0300 | [diff] [blame] | 351 | out: |
| 352 | return rc; |
| 353 | } |
Riccardo Mancini | 6bd006c | 2021-08-21 11:19:10 +0200 | [diff] [blame] | 354 | |
| 355 | int mmap_cpu_mask__duplicate(struct mmap_cpu_mask *original, struct mmap_cpu_mask *clone) |
| 356 | { |
| 357 | clone->nbits = original->nbits; |
| 358 | clone->bits = bitmap_zalloc(original->nbits); |
| 359 | if (!clone->bits) |
| 360 | return -ENOMEM; |
| 361 | |
| 362 | memcpy(clone->bits, original->bits, MMAP_CPU_MASK_BYTES(original)); |
| 363 | return 0; |
| 364 | } |