blob: a3541f98e1fcb5f97a8bf3a2f99b2d669f7fc32b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -03002#ifndef __PERF_ENV_H
3#define __PERF_ENV_H
4
Jiri Olsa720e98b2016-02-16 16:01:43 +01005#include <linux/types.h>
Song Liue4378f02019-03-11 22:30:42 -07006#include <linux/rbtree.h>
Ian Rogers6d188042022-01-04 22:13:51 -08007#include "cpumap.h"
Song Liue4378f02019-03-11 22:30:42 -07008#include "rwsem.h"
Jiri Olsa720e98b2016-02-16 16:01:43 +01009
Arnaldo Carvalho de Melo87ffb6c2019-09-10 16:29:02 +010010struct perf_cpu_map;
11
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030012struct cpu_topology_map {
13 int socket_id;
Kan Liangacae8b32019-06-04 15:50:41 -070014 int die_id;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030015 int core_id;
16};
17
Jiri Olsa720e98b2016-02-16 16:01:43 +010018struct cpu_cache_level {
19 u32 level;
20 u32 line_size;
21 u32 sets;
22 u32 ways;
23 char *type;
24 char *size;
25 char *map;
26};
27
Jiri Olsac60da222016-07-04 14:16:20 +020028struct numa_node {
29 u32 node;
30 u64 mem_total;
31 u64 mem_free;
Jiri Olsaf8548392019-07-21 13:23:49 +020032 struct perf_cpu_map *map;
Jiri Olsac60da222016-07-04 14:16:20 +020033};
34
Jiri Olsae2091ce2018-03-07 16:50:08 +010035struct memory_node {
36 u64 node;
37 u64 size;
38 unsigned long *set;
39};
40
Jin Yaof7d74ce2021-05-14 20:29:47 +080041struct hybrid_node {
42 char *pmu_name;
43 char *cpus;
44};
45
Jin Yaoe1190832021-05-14 20:29:48 +080046struct hybrid_cpc_node {
47 int nr_cpu_pmu_caps;
48 unsigned int max_branches;
49 char *cpu_pmu_caps;
50 char *pmu_name;
51};
52
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030053struct perf_env {
54 char *hostname;
55 char *os_release;
56 char *version;
57 char *arch;
58 int nr_cpus_online;
59 int nr_cpus_avail;
60 char *cpu_desc;
61 char *cpuid;
62 unsigned long long total_mem;
Kan Liange0838e02015-09-10 11:03:05 -030063 unsigned int msr_pmu_type;
Kan Liang6f91ea22020-03-19 13:25:02 -070064 unsigned int max_branches;
Leo Yan7c0223e2021-08-09 19:27:25 +080065 int kernel_is_64_bit;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030066
67 int nr_cmdline;
68 int nr_sibling_cores;
Kan Liangacae8b32019-06-04 15:50:41 -070069 int nr_sibling_dies;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030070 int nr_sibling_threads;
71 int nr_numa_nodes;
Jiri Olsae2091ce2018-03-07 16:50:08 +010072 int nr_memory_nodes;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030073 int nr_pmu_mappings;
74 int nr_groups;
Kan Liang6f91ea22020-03-19 13:25:02 -070075 int nr_cpu_pmu_caps;
Jin Yaof7d74ce2021-05-14 20:29:47 +080076 int nr_hybrid_nodes;
Jin Yaoe1190832021-05-14 20:29:48 +080077 int nr_hybrid_cpc_nodes;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030078 char *cmdline;
79 const char **cmdline_argv;
80 char *sibling_cores;
Kan Liangacae8b32019-06-04 15:50:41 -070081 char *sibling_dies;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030082 char *sibling_threads;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030083 char *pmu_mappings;
Kan Liang6f91ea22020-03-19 13:25:02 -070084 char *cpu_pmu_caps;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -030085 struct cpu_topology_map *cpu;
Jiri Olsa720e98b2016-02-16 16:01:43 +010086 struct cpu_cache_level *caches;
87 int caches_cnt;
Alexey Budankovd3c8c082019-03-18 20:41:02 +030088 u32 comp_ratio;
Alexey Budankov42e1fd82019-03-18 20:41:33 +030089 u32 comp_ver;
90 u32 comp_type;
91 u32 comp_level;
92 u32 comp_mmap_len;
Jiri Olsac60da222016-07-04 14:16:20 +020093 struct numa_node *numa_nodes;
Jiri Olsae2091ce2018-03-07 16:50:08 +010094 struct memory_node *memory_nodes;
95 unsigned long long memory_bsize;
Jin Yaof7d74ce2021-05-14 20:29:47 +080096 struct hybrid_node *hybrid_nodes;
Jin Yaoe1190832021-05-14 20:29:48 +080097 struct hybrid_cpc_node *hybrid_cpc_nodes;
Arnaldo Carvalho de Meloef0580e2020-10-20 15:57:21 -030098#ifdef HAVE_LIBBPF_SUPPORT
Song Liue4378f02019-03-11 22:30:42 -070099 /*
100 * bpf_info_lock protects bpf rbtrees. This is needed because the
101 * trees are accessed by different threads in perf-top
102 */
103 struct {
104 struct rw_semaphore lock;
105 struct rb_root infos;
106 u32 infos_cnt;
Song Liu3792cb22019-03-11 22:30:44 -0700107 struct rb_root btfs;
108 u32 btfs_cnt;
Song Liue4378f02019-03-11 22:30:42 -0700109 } bpf_progs;
Arnaldo Carvalho de Meloef0580e2020-10-20 15:57:21 -0300110#endif // HAVE_LIBBPF_SUPPORT
Namhyung Kimd1277aa2020-03-25 21:45:31 +0900111 /* same reason as above (for perf-top) */
112 struct {
113 struct rw_semaphore lock;
114 struct rb_root tree;
115 } cgroups;
116
Jiri Olsa389799a2019-08-29 13:31:48 +0200117 /* For fast cpu to numa node lookup via perf_env__numa_node */
118 int *numa_map;
119 int nr_numa_map;
Jiri Olsad1e325c2020-08-05 11:34:40 +0200120
121 /* For real clock time reference. */
122 struct {
123 u64 tod_ns;
124 u64 clockid_ns;
Jiri Olsa9d88a1a12020-08-05 11:34:41 +0200125 u64 clockid_res_ns;
Jiri Olsad1e325c2020-08-05 11:34:40 +0200126 int clockid;
127 /*
128 * enabled is valid for report mode, and is true if above
129 * values are set, it's set in process_clock_data
130 */
131 bool enabled;
132 } clock;
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -0300133};
134
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300135enum perf_compress_type {
136 PERF_COMP_NONE = 0,
137 PERF_COMP_ZSTD,
138 PERF_COMP_MAX
139};
140
Song Liue4378f02019-03-11 22:30:42 -0700141struct bpf_prog_info_node;
Song Liu3792cb22019-03-11 22:30:44 -0700142struct btf_node;
Song Liue4378f02019-03-11 22:30:42 -0700143
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300144extern struct perf_env perf_env;
145
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -0300146void perf_env__exit(struct perf_env *env);
147
Leo Yan7c0223e2021-08-09 19:27:25 +0800148int perf_env__kernel_is_64_bit(struct perf_env *env);
149
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300150int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
151
Arnaldo Carvalho de Melof1cedfb2019-09-30 11:50:15 -0300152int perf_env__read_cpuid(struct perf_env *env);
Kim Phillips9fe88952021-08-17 17:15:07 -0500153int perf_env__read_pmu_mappings(struct perf_env *env);
154int perf_env__nr_pmu_mappings(struct perf_env *env);
155const char *perf_env__pmu_mappings(struct perf_env *env);
156
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300157int perf_env__read_cpu_topology_map(struct perf_env *env);
158
Jiri Olsa720e98b2016-02-16 16:01:43 +0100159void cpu_cache_level__free(struct cpu_cache_level *cache);
Arnaldo Carvalho de Melo4e8fbc12017-12-11 14:47:49 -0300160
161const char *perf_env__arch(struct perf_env *env);
Kim Phillips9fe88952021-08-17 17:15:07 -0500162const char *perf_env__cpuid(struct perf_env *env);
Adrian Hunterdbbd34a2018-05-17 12:21:53 +0300163const char *perf_env__raw_arch(struct perf_env *env);
Adrian Hunter9cecca32018-05-22 13:54:32 +0300164int perf_env__nr_cpus_avail(struct perf_env *env);
Adrian Hunterdbbd34a2018-05-17 12:21:53 +0300165
Song Liue4378f02019-03-11 22:30:42 -0700166void perf_env__init(struct perf_env *env);
167void perf_env__insert_bpf_prog_info(struct perf_env *env,
168 struct bpf_prog_info_node *info_node);
169struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
170 __u32 prog_id);
Ian Rogers4924b1f2021-11-11 23:45:25 -0800171bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
Song Liu3792cb22019-03-11 22:30:44 -0700172struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
Jiri Olsa389799a2019-08-29 13:31:48 +0200173
Ian Rogers6d188042022-01-04 22:13:51 -0800174int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -0300175#endif /* __PERF_ENV_H */