blob: b99213ba11b5f072c54ae3177114fc65960fab09 [file] [log] [blame]
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -03001#ifndef __PERF_MMAP_H
2#define __PERF_MMAP_H 1
3
4#include <linux/compiler.h>
5#include <linux/refcount.h>
6#include <linux/types.h>
Daniel Borkmann09d621542018-10-19 15:51:02 +02007#include <linux/ring_buffer.h>
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -03008#include <stdbool.h>
Alexey Budankov0b773832018-11-06 12:03:35 +03009#ifdef HAVE_AIO_SUPPORT
10#include <aio.h>
11#endif
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030012#include "auxtrace.h"
13#include "event.h"
14
Alexey Budankovd3d1af62018-11-06 12:04:58 +030015struct aiocb;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030016/**
17 * struct perf_mmap - perf's ring buffer mmap details
18 *
19 * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
20 */
21struct perf_mmap {
22 void *base;
23 int mask;
24 int fd;
Jiri Olsa31fb4c02018-08-17 13:45:55 +020025 int cpu;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030026 refcount_t refcnt;
27 u64 prev;
Kan Liang4fda3452018-03-06 10:36:01 -050028 u64 start;
29 u64 end;
Kan Liang2c5f6d82018-03-06 10:36:00 -050030 bool overwrite;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030031 struct auxtrace_mmap auxtrace_mmap;
32 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
Alexey Budankov0b773832018-11-06 12:03:35 +030033#ifdef HAVE_AIO_SUPPORT
34 struct {
35 void *data;
36 struct aiocb cblock;
Alexey Budankovd3d1af62018-11-06 12:04:58 +030037 int nr_cblocks;
Alexey Budankov0b773832018-11-06 12:03:35 +030038 } aio;
39#endif
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030040};
41
42/*
43 * State machine of bkw_mmap_state:
44 *
45 * .________________(forbid)_____________.
46 * | V
47 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
48 * ^ ^ | ^ |
49 * | |__(forbid)____/ |___(forbid)___/|
50 * | |
51 * \_________________(3)_______________/
52 *
53 * NOTREADY : Backward ring buffers are not ready
54 * RUNNING : Backward ring buffers are recording
55 * DATA_PENDING : We are required to collect data from backward ring buffers
56 * EMPTY : We have collected data from backward ring buffers.
57 *
58 * (0): Setup backward ring buffer
59 * (1): Pause ring buffers for reading
60 * (2): Read from ring buffers
61 * (3): Resume ring buffers for recording
62 */
63enum bkw_mmap_state {
64 BKW_MMAP_NOTREADY,
65 BKW_MMAP_RUNNING,
66 BKW_MMAP_DATA_PENDING,
67 BKW_MMAP_EMPTY,
68};
69
70struct mmap_params {
Alexey Budankov0b773832018-11-06 12:03:35 +030071 int prot, mask, nr_cblocks;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030072 struct auxtrace_mmap_params auxtrace_mp;
73};
74
Jiri Olsa31fb4c02018-08-17 13:45:55 +020075int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030076void perf_mmap__munmap(struct perf_mmap *map);
77
78void perf_mmap__get(struct perf_mmap *map);
79void perf_mmap__put(struct perf_mmap *map);
80
Kan Liangd6ace3d2018-03-06 10:36:05 -050081void perf_mmap__consume(struct perf_mmap *map);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030082
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030083static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
84{
Daniel Borkmann09d621542018-10-19 15:51:02 +020085 return ring_buffer_read_head(mm->base);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030086}
87
88static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
89{
Daniel Borkmann09d621542018-10-19 15:51:02 +020090 ring_buffer_write_tail(md->base, tail);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030091}
92
Wang Nan8eb7a1f2017-12-03 02:00:41 +000093union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030094
Kan Liang0019dc872018-03-06 10:36:06 -050095union perf_event *perf_mmap__read_event(struct perf_mmap *map);
Kan Liang7bb45972018-01-18 13:26:23 -080096
Kan Liang07a94612018-03-06 10:36:02 -050097int perf_mmap__push(struct perf_mmap *md, void *to,
Jiri Olsaded2b8f2018-09-13 14:54:06 +020098 int push(struct perf_mmap *map, void *to, void *buf, size_t size));
Alexey Budankovd3d1af62018-11-06 12:04:58 +030099#ifdef HAVE_AIO_SUPPORT
100int perf_mmap__aio_push(struct perf_mmap *md, void *to,
101 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
102 off_t *off);
103#else
104static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused,
105 int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused,
106 off_t *off __maybe_unused)
107{
108 return 0;
109}
110#endif
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300111
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300112size_t perf_mmap__mmap_len(struct perf_mmap *map);
113
Kan Liangb9bae2c2018-03-06 10:36:07 -0500114int perf_mmap__read_init(struct perf_mmap *md);
Kan Liangee023de2018-01-18 13:26:22 -0800115void perf_mmap__read_done(struct perf_mmap *map);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300116#endif /*__PERF_MMAP_H */