Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | 105ab3d | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 2 | #ifndef _LINUX_SCHED_TOPOLOGY_H |
| 3 | #define _LINUX_SCHED_TOPOLOGY_H |
| 4 | |
Ingo Molnar | ee6a3d1 | 2017-02-06 10:01:09 +0100 | [diff] [blame] | 5 | #include <linux/topology.h> |
| 6 | |
Ingo Molnar | 4c82269 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 7 | #include <linux/sched/idle.h> |
| 8 | |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 9 | /* |
| 10 | * sched-domains (multiprocessor balancing) declarations: |
| 11 | */ |
| 12 | #ifdef CONFIG_SMP |
| 13 | |
Valentin Schneider | d54a965 | 2020-08-17 12:29:49 +0100 | [diff] [blame^] | 14 | /* Generate SD flag indexes */ |
| 15 | #define SD_FLAG(name) __##name, |
| 16 | enum { |
| 17 | #include <linux/sched/sd_flags.h> |
| 18 | __SD_FLAG_CNT, |
| 19 | }; |
| 20 | #undef SD_FLAG |
| 21 | /* Generate SD flag bits */ |
| 22 | #define SD_FLAG(name) name = 1 << __##name, |
| 23 | enum { |
| 24 | #include <linux/sched/sd_flags.h> |
| 25 | }; |
| 26 | #undef SD_FLAG |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 27 | |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 28 | #ifdef CONFIG_SCHED_SMT |
| 29 | static inline int cpu_smt_flags(void) |
| 30 | { |
| 31 | return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; |
| 32 | } |
| 33 | #endif |
| 34 | |
| 35 | #ifdef CONFIG_SCHED_MC |
| 36 | static inline int cpu_core_flags(void) |
| 37 | { |
| 38 | return SD_SHARE_PKG_RESOURCES; |
| 39 | } |
| 40 | #endif |
| 41 | |
| 42 | #ifdef CONFIG_NUMA |
| 43 | static inline int cpu_numa_flags(void) |
| 44 | { |
| 45 | return SD_NUMA; |
| 46 | } |
| 47 | #endif |
| 48 | |
| 49 | extern int arch_asym_cpu_priority(int cpu); |
| 50 | |
| 51 | struct sched_domain_attr { |
| 52 | int relax_domain_level; |
| 53 | }; |
| 54 | |
| 55 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ |
| 56 | .relax_domain_level = -1, \ |
| 57 | } |
| 58 | |
| 59 | extern int sched_domain_level_max; |
| 60 | |
| 61 | struct sched_group; |
| 62 | |
| 63 | struct sched_domain_shared { |
| 64 | atomic_t ref; |
| 65 | atomic_t nr_busy_cpus; |
| 66 | int has_idle_cores; |
| 67 | }; |
| 68 | |
| 69 | struct sched_domain { |
| 70 | /* These fields must be setup */ |
Joel Fernandes (Google) | 994aeb7 | 2019-03-20 20:34:24 -0400 | [diff] [blame] | 71 | struct sched_domain __rcu *parent; /* top domain must be null terminated */ |
| 72 | struct sched_domain __rcu *child; /* bottom domain must be null terminated */ |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 73 | struct sched_group *groups; /* the balancing groups of the domain */ |
| 74 | unsigned long min_interval; /* Minimum balance interval ms */ |
| 75 | unsigned long max_interval; /* Maximum balance interval ms */ |
| 76 | unsigned int busy_factor; /* less balancing by factor if busy */ |
| 77 | unsigned int imbalance_pct; /* No balance until over watermark */ |
| 78 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 79 | |
| 80 | int nohz_idle; /* NOHZ IDLE status */ |
| 81 | int flags; /* See SD_* */ |
| 82 | int level; |
| 83 | |
| 84 | /* Runtime fields. */ |
| 85 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
| 86 | unsigned int balance_interval; /* initialise to 1. units in ms. */ |
| 87 | unsigned int nr_balance_failed; /* initialise to 0 */ |
| 88 | |
| 89 | /* idle_balance() stats */ |
| 90 | u64 max_newidle_lb_cost; |
| 91 | unsigned long next_decay_max_lb_cost; |
| 92 | |
| 93 | u64 avg_scan_cost; /* select_idle_sibling */ |
| 94 | |
| 95 | #ifdef CONFIG_SCHEDSTATS |
| 96 | /* load_balance() stats */ |
| 97 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; |
| 98 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; |
| 99 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; |
| 100 | unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; |
| 101 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; |
| 102 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; |
| 103 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; |
| 104 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; |
| 105 | |
| 106 | /* Active load balancing */ |
| 107 | unsigned int alb_count; |
| 108 | unsigned int alb_failed; |
| 109 | unsigned int alb_pushed; |
| 110 | |
| 111 | /* SD_BALANCE_EXEC stats */ |
| 112 | unsigned int sbe_count; |
| 113 | unsigned int sbe_balanced; |
| 114 | unsigned int sbe_pushed; |
| 115 | |
| 116 | /* SD_BALANCE_FORK stats */ |
| 117 | unsigned int sbf_count; |
| 118 | unsigned int sbf_balanced; |
| 119 | unsigned int sbf_pushed; |
| 120 | |
| 121 | /* try_to_wake_up() stats */ |
| 122 | unsigned int ttwu_wake_remote; |
| 123 | unsigned int ttwu_move_affine; |
| 124 | unsigned int ttwu_move_balance; |
| 125 | #endif |
| 126 | #ifdef CONFIG_SCHED_DEBUG |
| 127 | char *name; |
| 128 | #endif |
| 129 | union { |
| 130 | void *private; /* used during construction */ |
| 131 | struct rcu_head rcu; /* used during destruction */ |
| 132 | }; |
| 133 | struct sched_domain_shared *shared; |
| 134 | |
| 135 | unsigned int span_weight; |
| 136 | /* |
| 137 | * Span of all CPUs in this domain. |
| 138 | * |
| 139 | * NOTE: this field is variable length. (Allocated dynamically |
| 140 | * by attaching extra space to the end of the structure, |
| 141 | * depending on how many CPUs the kernel has booted up with) |
| 142 | */ |
Gustavo A. R. Silva | fe946db | 2020-03-23 19:14:37 -0500 | [diff] [blame] | 143 | unsigned long span[]; |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 144 | }; |
| 145 | |
| 146 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
| 147 | { |
| 148 | return to_cpumask(sd->span); |
| 149 | } |
| 150 | |
Mathieu Poirier | c22645f | 2019-07-19 15:59:53 +0200 | [diff] [blame] | 151 | extern void partition_sched_domains_locked(int ndoms_new, |
| 152 | cpumask_var_t doms_new[], |
| 153 | struct sched_domain_attr *dattr_new); |
| 154 | |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 155 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 156 | struct sched_domain_attr *dattr_new); |
| 157 | |
| 158 | /* Allocate an array of sched domains, for partition_sched_domains(). */ |
| 159 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); |
| 160 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); |
| 161 | |
| 162 | bool cpus_share_cache(int this_cpu, int that_cpu); |
| 163 | |
| 164 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); |
| 165 | typedef int (*sched_domain_flags_f)(void); |
| 166 | |
| 167 | #define SDTL_OVERLAP 0x01 |
| 168 | |
| 169 | struct sd_data { |
Luc Van Oostenryck | 99687cd | 2019-01-18 15:49:36 +0100 | [diff] [blame] | 170 | struct sched_domain *__percpu *sd; |
| 171 | struct sched_domain_shared *__percpu *sds; |
| 172 | struct sched_group *__percpu *sg; |
| 173 | struct sched_group_capacity *__percpu *sgc; |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 174 | }; |
| 175 | |
| 176 | struct sched_domain_topology_level { |
| 177 | sched_domain_mask_f mask; |
| 178 | sched_domain_flags_f sd_flags; |
| 179 | int flags; |
| 180 | int numa_level; |
| 181 | struct sd_data data; |
| 182 | #ifdef CONFIG_SCHED_DEBUG |
| 183 | char *name; |
| 184 | #endif |
| 185 | }; |
| 186 | |
| 187 | extern void set_sched_topology(struct sched_domain_topology_level *tl); |
| 188 | |
| 189 | #ifdef CONFIG_SCHED_DEBUG |
| 190 | # define SD_INIT_NAME(type) .name = #type |
| 191 | #else |
| 192 | # define SD_INIT_NAME(type) |
| 193 | #endif |
| 194 | |
| 195 | #else /* CONFIG_SMP */ |
| 196 | |
| 197 | struct sched_domain_attr; |
| 198 | |
| 199 | static inline void |
Mathieu Poirier | c22645f | 2019-07-19 15:59:53 +0200 | [diff] [blame] | 200 | partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], |
| 201 | struct sched_domain_attr *dattr_new) |
| 202 | { |
| 203 | } |
| 204 | |
| 205 | static inline void |
Ingo Molnar | a60b9ed | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 206 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 207 | struct sched_domain_attr *dattr_new) |
| 208 | { |
| 209 | } |
| 210 | |
| 211 | static inline bool cpus_share_cache(int this_cpu, int that_cpu) |
| 212 | { |
| 213 | return true; |
| 214 | } |
| 215 | |
Vincent Guittot | 8ec59c0 | 2019-06-17 17:00:17 +0200 | [diff] [blame] | 216 | #endif /* !CONFIG_SMP */ |
| 217 | |
Quentin Perret | 5bd0988 | 2018-12-03 09:56:14 +0000 | [diff] [blame] | 218 | #ifndef arch_scale_cpu_capacity |
Valentin Schneider | f4470cd | 2020-07-31 20:20:14 +0100 | [diff] [blame] | 219 | /** |
| 220 | * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU. |
| 221 | * @cpu: the CPU in question. |
| 222 | * |
| 223 | * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e. |
| 224 | * |
| 225 | * max_perf(cpu) |
| 226 | * ----------------------------- * SCHED_CAPACITY_SCALE |
| 227 | * max(max_perf(c) : c \in CPUs) |
| 228 | */ |
Quentin Perret | 5bd0988 | 2018-12-03 09:56:14 +0000 | [diff] [blame] | 229 | static __always_inline |
Vincent Guittot | 8ec59c0 | 2019-06-17 17:00:17 +0200 | [diff] [blame] | 230 | unsigned long arch_scale_cpu_capacity(int cpu) |
Quentin Perret | 5bd0988 | 2018-12-03 09:56:14 +0000 | [diff] [blame] | 231 | { |
| 232 | return SCHED_CAPACITY_SCALE; |
| 233 | } |
| 234 | #endif |
| 235 | |
Thara Gopinath | 36a0df8 | 2020-02-21 19:52:06 -0500 | [diff] [blame] | 236 | #ifndef arch_scale_thermal_pressure |
| 237 | static __always_inline |
| 238 | unsigned long arch_scale_thermal_pressure(int cpu) |
| 239 | { |
| 240 | return 0; |
| 241 | } |
| 242 | #endif |
| 243 | |
Valentin Schneider | 25980c7 | 2020-07-12 17:59:15 +0100 | [diff] [blame] | 244 | #ifndef arch_set_thermal_pressure |
| 245 | static __always_inline |
| 246 | void arch_set_thermal_pressure(const struct cpumask *cpus, |
| 247 | unsigned long th_pressure) |
| 248 | { } |
| 249 | #endif |
| 250 | |
Ingo Molnar | ee6a3d1 | 2017-02-06 10:01:09 +0100 | [diff] [blame] | 251 | static inline int task_node(const struct task_struct *p) |
| 252 | { |
| 253 | return cpu_to_node(task_cpu(p)); |
| 254 | } |
| 255 | |
Ingo Molnar | 105ab3d | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 256 | #endif /* _LINUX_SCHED_TOPOLOGY_H */ |