Thomas Gleixner | 6eda583 | 2009-05-01 18:29:57 +0200 | [diff] [blame] | 1 | #ifndef _PERF_PERF_H |
| 2 | #define _PERF_PERF_H |
| 3 | |
Jiri Olsa | 43599d1 | 2014-05-05 12:53:20 +0200 | [diff] [blame^] | 4 | #include "perf-sys.h" |
Peter Zijlstra | a94d342 | 2013-10-30 11:42:46 +0100 | [diff] [blame] | 5 | |
| 6 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
| 7 | |
Peter Zijlstra | 1a482f3 | 2009-05-23 18:28:58 +0200 | [diff] [blame] | 8 | #include <time.h> |
| 9 | #include <unistd.h> |
| 10 | #include <sys/types.h> |
| 11 | #include <sys/syscall.h> |
| 12 | |
Borislav Petkov | d944c4e | 2014-04-25 21:31:02 +0200 | [diff] [blame] | 13 | #include <linux/types.h> |
David Howells | d2709c7 | 2012-11-19 22:21:03 +0000 | [diff] [blame] | 14 | #include <linux/perf_event.h> |
Peter Zijlstra | 1a482f3 | 2009-05-23 18:28:58 +0200 | [diff] [blame] | 15 | |
Thomas Gleixner | a92e70237 | 2009-05-01 18:39:47 +0200 | [diff] [blame] | 16 | #ifndef NSEC_PER_SEC |
| 17 | # define NSEC_PER_SEC 1000000000ULL |
| 18 | #endif |
David Ahern | 70f7b4a | 2013-08-07 21:56:38 -0400 | [diff] [blame] | 19 | #ifndef NSEC_PER_USEC |
| 20 | # define NSEC_PER_USEC 1000ULL |
| 21 | #endif |
Thomas Gleixner | a92e70237 | 2009-05-01 18:39:47 +0200 | [diff] [blame] | 22 | |
| 23 | static inline unsigned long long rdclock(void) |
| 24 | { |
| 25 | struct timespec ts; |
| 26 | |
| 27 | clock_gettime(CLOCK_MONOTONIC, &ts); |
| 28 | return ts.tv_sec * 1000000000ULL + ts.tv_nsec; |
| 29 | } |
Thomas Gleixner | 6eda583 | 2009-05-01 18:29:57 +0200 | [diff] [blame] | 30 | |
Jiri Olsa | 52502bf | 2012-10-31 15:52:47 +0100 | [diff] [blame] | 31 | extern bool test_attr__enabled; |
| 32 | void test_attr__init(void); |
| 33 | void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, |
| 34 | int fd, int group_fd, unsigned long flags); |
| 35 | |
Thomas Gleixner | 6eda583 | 2009-05-01 18:29:57 +0200 | [diff] [blame] | 36 | static inline int |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 37 | sys_perf_event_open(struct perf_event_attr *attr, |
Thomas Gleixner | 6eda583 | 2009-05-01 18:29:57 +0200 | [diff] [blame] | 38 | pid_t pid, int cpu, int group_fd, |
| 39 | unsigned long flags) |
| 40 | { |
Jiri Olsa | 52502bf | 2012-10-31 15:52:47 +0100 | [diff] [blame] | 41 | int fd; |
| 42 | |
| 43 | fd = syscall(__NR_perf_event_open, attr, pid, cpu, |
| 44 | group_fd, flags); |
| 45 | |
| 46 | if (unlikely(test_attr__enabled)) |
| 47 | test_attr__open(attr, pid, cpu, fd, group_fd, flags); |
| 48 | |
| 49 | return fd; |
Thomas Gleixner | 6eda583 | 2009-05-01 18:29:57 +0200 | [diff] [blame] | 50 | } |
| 51 | |
Ingo Molnar | 85a9f92 | 2009-05-25 09:59:50 +0200 | [diff] [blame] | 52 | #define MAX_NR_CPUS 256 |
Thomas Gleixner | 6eda583 | 2009-05-01 18:29:57 +0200 | [diff] [blame] | 53 | |
Feng Tang | 70cb4e9 | 2012-10-30 11:56:02 +0800 | [diff] [blame] | 54 | extern const char *input_name; |
Arnaldo Carvalho de Melo | 8035458 | 2010-05-17 15:51:10 -0300 | [diff] [blame] | 55 | extern bool perf_host, perf_guest; |
Stephane Eranian | fbe96f2 | 2011-09-30 15:40:40 +0200 | [diff] [blame] | 56 | extern const char perf_version_string[]; |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 57 | |
Arnaldo Carvalho de Melo | 3af6e33 | 2011-10-13 08:52:46 -0300 | [diff] [blame] | 58 | void pthread__unblock_sigwinch(void); |
| 59 | |
Namhyung Kim | 12864b3 | 2012-04-26 14:15:22 +0900 | [diff] [blame] | 60 | #include "util/target.h" |
Namhyung Kim | bea0340 | 2012-04-26 14:15:15 +0900 | [diff] [blame] | 61 | |
Arnaldo Carvalho de Melo | b400679 | 2013-12-19 14:43:45 -0300 | [diff] [blame] | 62 | struct record_opts { |
Arnaldo Carvalho de Melo | 602ad87 | 2013-11-12 16:46:16 -0300 | [diff] [blame] | 63 | struct target target; |
Jiri Olsa | 26d3302 | 2012-08-07 15:20:47 +0200 | [diff] [blame] | 64 | int call_graph; |
Jiri Olsa | eb853e8 | 2014-02-03 12:44:42 +0100 | [diff] [blame] | 65 | bool call_graph_enabled; |
Arnaldo Carvalho de Melo | ed80f58 | 2011-11-11 15:12:56 -0200 | [diff] [blame] | 66 | bool group; |
Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 67 | bool inherit_stat; |
Arnaldo Carvalho de Melo | 509051e | 2014-01-14 17:52:14 -0300 | [diff] [blame] | 68 | bool no_buffering; |
Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 69 | bool no_inherit; |
Adrian Hunter | 69e7e5b | 2013-11-18 11:55:57 +0200 | [diff] [blame] | 70 | bool no_inherit_set; |
Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 71 | bool no_samples; |
| 72 | bool raw_samples; |
| 73 | bool sample_address; |
Andi Kleen | 0548429 | 2013-01-24 16:10:29 +0100 | [diff] [blame] | 74 | bool sample_weight; |
Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 75 | bool sample_time; |
Andrew Vagin | 3e76ac7 | 2011-12-20 17:32:45 +0300 | [diff] [blame] | 76 | bool period; |
Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 77 | unsigned int freq; |
Arnaldo Carvalho de Melo | 01c2d99 | 2011-11-09 09:16:26 -0200 | [diff] [blame] | 78 | unsigned int mmap_pages; |
Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 79 | unsigned int user_freq; |
Stephane Eranian | a00dc31 | 2012-05-25 23:13:44 +0200 | [diff] [blame] | 80 | u64 branch_stack; |
Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 81 | u64 default_interval; |
| 82 | u64 user_interval; |
Jiri Olsa | 26d3302 | 2012-08-07 15:20:47 +0200 | [diff] [blame] | 83 | u16 stack_dump_size; |
Andi Kleen | 475eeab | 2013-09-20 07:40:43 -0700 | [diff] [blame] | 84 | bool sample_transaction; |
Andi Kleen | 6619a53 | 2014-01-11 13:38:27 -0800 | [diff] [blame] | 85 | unsigned initial_delay; |
Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 86 | }; |
| 87 | |
Thomas Gleixner | 6eda583 | 2009-05-01 18:29:57 +0200 | [diff] [blame] | 88 | #endif |