Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Written by: Matthew Dobson, IBM Corporation |
| 3 | * |
| 4 | * Copyright (C) 2002, IBM Corp. |
| 5 | * |
| 6 | * All rights reserved. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2 of the License, or |
| 11 | * (at your option) any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 16 | * NON INFRINGEMENT. See the GNU General Public License for more |
| 17 | * details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this program; if not, write to the Free Software |
| 21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 22 | * |
| 23 | * Send feedback to <colpatch@us.ibm.com> |
| 24 | */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 25 | #ifndef _ASM_X86_TOPOLOGY_H |
| 26 | #define _ASM_X86_TOPOLOGY_H |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 27 | |
Lee Schermerhorn | 4e25b25 | 2009-12-14 17:58:23 -0800 | [diff] [blame] | 28 | /* |
| 29 | * to preserve the visibility of NUMA_NO_NODE definition, |
| 30 | * moved to there from here. May be used independent of |
| 31 | * CONFIG_NUMA. |
| 32 | */ |
| 33 | #include <linux/numa.h> |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 34 | |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 35 | #ifdef CONFIG_NUMA |
| 36 | #include <linux/cpumask.h> |
Lee Schermerhorn | 4e25b25 | 2009-12-14 17:58:23 -0800 | [diff] [blame] | 37 | |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 38 | #include <asm/mpspec.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 39 | #include <asm/percpu.h> |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 40 | |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 41 | /* Mappings between logical cpu number and node number */ |
| 42 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 43 | |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 44 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
Lee Schermerhorn | e534c7c | 2010-05-26 14:44:58 -0700 | [diff] [blame] | 45 | /* |
| 46 | * override generic percpu implementation of cpu_to_node |
| 47 | */ |
| 48 | extern int __cpu_to_node(int cpu); |
| 49 | #define cpu_to_node __cpu_to_node |
| 50 | |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 51 | extern int early_cpu_to_node(int cpu); |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 52 | |
| 53 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
| 54 | |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 55 | /* Same function but used if called before per_cpu areas are setup */ |
| 56 | static inline int early_cpu_to_node(int cpu) |
| 57 | { |
Tejun Heo | f10fcd47 | 2009-01-13 20:41:34 +0900 | [diff] [blame] | 58 | return early_per_cpu(x86_cpu_to_node_map, cpu); |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 59 | } |
Mike Travis | aa6b544 | 2008-03-31 08:41:55 -0700 | [diff] [blame] | 60 | |
Rusty Russell | 71ee73e | 2009-03-13 14:49:52 +1030 | [diff] [blame] | 61 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 62 | |
Rusty Russell | 71ee73e | 2009-03-13 14:49:52 +1030 | [diff] [blame] | 63 | /* Mappings between node number and cpus on that node. */ |
Rusty Russell | c032ef60 | 2009-03-13 14:49:53 +1030 | [diff] [blame] | 64 | extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
Rusty Russell | 71ee73e | 2009-03-13 14:49:52 +1030 | [diff] [blame] | 65 | |
| 66 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
Rusty Russell | 73e907d | 2009-03-13 14:49:57 +1030 | [diff] [blame] | 67 | extern const struct cpumask *cpumask_of_node(int node); |
Rusty Russell | 71ee73e | 2009-03-13 14:49:52 +1030 | [diff] [blame] | 68 | #else |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 69 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ |
Rusty Russell | c032ef60 | 2009-03-13 14:49:53 +1030 | [diff] [blame] | 70 | static inline const struct cpumask *cpumask_of_node(int node) |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 71 | { |
| 72 | return node_to_cpumask_map[node]; |
| 73 | } |
Rusty Russell | 71ee73e | 2009-03-13 14:49:52 +1030 | [diff] [blame] | 74 | #endif |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 75 | |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 76 | extern void setup_node_to_cpumask_map(void); |
| 77 | |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 78 | #define pcibus_to_node(bus) __pcibus_to_node(bus) |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 79 | |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 80 | extern int __node_distance(int, int); |
| 81 | #define node_distance(a, b) __node_distance(a, b) |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 82 | |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 83 | #else /* !CONFIG_NUMA */ |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 84 | |
Mike Travis | f2a0827 | 2009-01-15 09:19:32 -0800 | [diff] [blame] | 85 | static inline int numa_node_id(void) |
| 86 | { |
| 87 | return 0; |
| 88 | } |
Lee Schermerhorn | 7281201 | 2010-05-26 14:44:56 -0700 | [diff] [blame] | 89 | /* |
| 90 | * indicate override: |
| 91 | */ |
| 92 | #define numa_node_id numa_node_id |
Mike Travis | f2a0827 | 2009-01-15 09:19:32 -0800 | [diff] [blame] | 93 | |
Mike Travis | f2a0827 | 2009-01-15 09:19:32 -0800 | [diff] [blame] | 94 | static inline int early_cpu_to_node(int cpu) |
| 95 | { |
| 96 | return 0; |
| 97 | } |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 98 | |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 99 | static inline void setup_node_to_cpumask_map(void) { } |
| 100 | |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 101 | #endif |
| 102 | |
Mike Travis | aa6b544 | 2008-03-31 08:41:55 -0700 | [diff] [blame] | 103 | #include <asm-generic/topology.h> |
| 104 | |
Rusty Russell | 030bb20 | 2008-12-26 22:23:41 +1030 | [diff] [blame] | 105 | extern const struct cpumask *cpu_coregroup_mask(int cpu); |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 106 | |
Thomas Gleixner | 1f12e32 | 2016-02-22 22:19:15 +0000 | [diff] [blame] | 107 | #define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id) |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 108 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) |
Len Brown | 212bf4f | 2019-05-13 13:58:49 -0400 | [diff] [blame] | 109 | #define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id) |
Len Brown | 306a0de | 2019-05-13 13:58:48 -0400 | [diff] [blame] | 110 | #define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id) |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 111 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
Artem Fetishev | 825600c | 2014-03-28 13:33:39 -0700 | [diff] [blame] | 112 | |
Borislav Petkov | 1eb8f69 | 2021-01-14 10:36:59 +0100 | [diff] [blame] | 113 | extern unsigned int __max_die_per_package; |
| 114 | |
Sudeep Holla | 3282e6b | 2016-05-04 17:50:59 +0100 | [diff] [blame] | 115 | #ifdef CONFIG_SMP |
Len Brown | 2e4c54d | 2019-05-13 13:58:56 -0400 | [diff] [blame] | 116 | #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu)) |
Rusty Russell | 7ad728f | 2009-03-13 14:49:50 +1030 | [diff] [blame] | 117 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) |
Bartosz Golaszewski | 06931e6 | 2015-05-26 15:11:28 +0200 | [diff] [blame] | 118 | #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) |
Thomas Gleixner | 1f12e32 | 2016-02-22 22:19:15 +0000 | [diff] [blame] | 119 | |
| 120 | extern unsigned int __max_logical_packages; |
| 121 | #define topology_max_packages() (__max_logical_packages) |
Andi Kleen | 70b8301 | 2016-05-19 17:09:55 -0700 | [diff] [blame] | 122 | |
Len Brown | 14d96d6 | 2019-05-13 13:58:46 -0400 | [diff] [blame] | 123 | static inline int topology_max_die_per_package(void) |
| 124 | { |
| 125 | return __max_die_per_package; |
| 126 | } |
| 127 | |
Andi Kleen | 70b8301 | 2016-05-19 17:09:55 -0700 | [diff] [blame] | 128 | extern int __max_smt_threads; |
| 129 | |
| 130 | static inline int topology_max_smt_threads(void) |
| 131 | { |
| 132 | return __max_smt_threads; |
| 133 | } |
| 134 | |
Thomas Gleixner | 1f12e32 | 2016-02-22 22:19:15 +0000 | [diff] [blame] | 135 | int topology_update_package_map(unsigned int apicid, unsigned int cpu); |
Len Brown | 212bf4f | 2019-05-13 13:58:49 -0400 | [diff] [blame] | 136 | int topology_update_die_map(unsigned int dieid, unsigned int cpu); |
Thomas Gleixner | 6a4d265 | 2018-05-29 17:50:22 +0200 | [diff] [blame] | 137 | int topology_phys_to_logical_pkg(unsigned int pkg); |
Len Brown | 212bf4f | 2019-05-13 13:58:49 -0400 | [diff] [blame] | 138 | int topology_phys_to_logical_die(unsigned int die, unsigned int cpu); |
Thomas Gleixner | 6a4d265 | 2018-05-29 17:50:22 +0200 | [diff] [blame] | 139 | bool topology_is_primary_thread(unsigned int cpu); |
Thomas Gleixner | f048c39 | 2018-06-21 10:37:20 +0200 | [diff] [blame] | 140 | bool topology_smt_supported(void); |
Thomas Gleixner | 1f12e32 | 2016-02-22 22:19:15 +0000 | [diff] [blame] | 141 | #else |
| 142 | #define topology_max_packages() (1) |
| 143 | static inline int |
| 144 | topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; } |
Len Brown | 212bf4f | 2019-05-13 13:58:49 -0400 | [diff] [blame] | 145 | static inline int |
| 146 | topology_update_die_map(unsigned int dieid, unsigned int cpu) { return 0; } |
Thomas Gleixner | 1f12e32 | 2016-02-22 22:19:15 +0000 | [diff] [blame] | 147 | static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } |
Len Brown | 14d96d6 | 2019-05-13 13:58:46 -0400 | [diff] [blame] | 148 | static inline int topology_phys_to_logical_die(unsigned int die, |
| 149 | unsigned int cpu) { return 0; } |
| 150 | static inline int topology_max_die_per_package(void) { return 1; } |
Andi Kleen | 70b8301 | 2016-05-19 17:09:55 -0700 | [diff] [blame] | 151 | static inline int topology_max_smt_threads(void) { return 1; } |
Thomas Gleixner | 6a4d265 | 2018-05-29 17:50:22 +0200 | [diff] [blame] | 152 | static inline bool topology_is_primary_thread(unsigned int cpu) { return true; } |
Thomas Gleixner | f048c39 | 2018-06-21 10:37:20 +0200 | [diff] [blame] | 153 | static inline bool topology_smt_supported(void) { return false; } |
Thomas Gleixner | 3367e56 | 2008-01-30 13:30:38 +0100 | [diff] [blame] | 154 | #endif |
| 155 | |
Alex Chiang | fe086a7 | 2008-04-29 15:05:29 -0700 | [diff] [blame] | 156 | static inline void arch_fix_phys_package_id(int num, u32 slot) |
| 157 | { |
| 158 | } |
| 159 | |
Yinghai Lu | 30a18d6 | 2008-02-19 03:21:20 -0800 | [diff] [blame] | 160 | struct pci_bus; |
Bjorn Helgaas | afcf21c2 | 2014-01-24 11:54:36 -0700 | [diff] [blame] | 161 | int x86_pci_root_bus_node(int bus); |
Bjorn Helgaas | 2cd6975 | 2011-10-28 16:28:14 -0600 | [diff] [blame] | 162 | void x86_pci_root_bus_resources(int bus, struct list_head *resources); |
Yinghai Lu | 30a18d6 | 2008-02-19 03:21:20 -0800 | [diff] [blame] | 163 | |
Tim Chen | 7d25127 | 2016-11-22 12:23:54 -0800 | [diff] [blame] | 164 | extern bool x86_topology_update; |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 165 | |
Tim Chen | de966cf | 2016-11-29 10:43:27 -0800 | [diff] [blame] | 166 | #ifdef CONFIG_SCHED_MC_PRIO |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 167 | #include <asm/percpu.h> |
| 168 | |
| 169 | DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority); |
Tim Chen | f9793e34 | 2016-11-22 12:23:56 -0800 | [diff] [blame] | 170 | extern unsigned int __read_mostly sysctl_sched_itmt_enabled; |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 171 | |
| 172 | /* Interface to set priority of a cpu */ |
| 173 | void sched_set_itmt_core_prio(int prio, int core_cpu); |
| 174 | |
| 175 | /* Interface to notify scheduler that system supports ITMT */ |
Tim Chen | f9793e34 | 2016-11-22 12:23:56 -0800 | [diff] [blame] | 176 | int sched_set_itmt_support(void); |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 177 | |
| 178 | /* Interface to notify scheduler that system revokes ITMT support */ |
| 179 | void sched_clear_itmt_support(void); |
| 180 | |
Tim Chen | de966cf | 2016-11-29 10:43:27 -0800 | [diff] [blame] | 181 | #else /* CONFIG_SCHED_MC_PRIO */ |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 182 | |
Tim Chen | f9793e34 | 2016-11-22 12:23:56 -0800 | [diff] [blame] | 183 | #define sysctl_sched_itmt_enabled 0 |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 184 | static inline void sched_set_itmt_core_prio(int prio, int core_cpu) |
| 185 | { |
| 186 | } |
Tim Chen | f9793e34 | 2016-11-22 12:23:56 -0800 | [diff] [blame] | 187 | static inline int sched_set_itmt_support(void) |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 188 | { |
Tim Chen | f9793e34 | 2016-11-22 12:23:56 -0800 | [diff] [blame] | 189 | return 0; |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 190 | } |
| 191 | static inline void sched_clear_itmt_support(void) |
| 192 | { |
| 193 | } |
Tim Chen | de966cf | 2016-11-29 10:43:27 -0800 | [diff] [blame] | 194 | #endif /* CONFIG_SCHED_MC_PRIO */ |
Tim Chen | 5e76b2a | 2016-11-22 12:23:55 -0800 | [diff] [blame] | 195 | |
Giovanni Gherdovich | e2b0d61 | 2020-05-31 20:24:51 +0200 | [diff] [blame] | 196 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_64) |
Giovanni Gherdovich | 1567c3e3 | 2020-01-22 16:16:12 +0100 | [diff] [blame] | 197 | #include <asm/cpufeature.h> |
| 198 | |
| 199 | DECLARE_STATIC_KEY_FALSE(arch_scale_freq_key); |
| 200 | |
| 201 | #define arch_scale_freq_invariant() static_branch_likely(&arch_scale_freq_key) |
| 202 | |
| 203 | DECLARE_PER_CPU(unsigned long, arch_freq_scale); |
| 204 | |
| 205 | static inline long arch_scale_freq_capacity(int cpu) |
| 206 | { |
| 207 | return per_cpu(arch_freq_scale, cpu); |
| 208 | } |
| 209 | #define arch_scale_freq_capacity arch_scale_freq_capacity |
| 210 | |
| 211 | extern void arch_scale_freq_tick(void); |
| 212 | #define arch_scale_freq_tick arch_scale_freq_tick |
| 213 | |
Giovanni Gherdovich | 918229c | 2020-01-22 16:16:17 +0100 | [diff] [blame] | 214 | extern void arch_set_max_freq_ratio(bool turbo_disabled); |
| 215 | #else |
| 216 | static inline void arch_set_max_freq_ratio(bool turbo_disabled) |
| 217 | { |
| 218 | } |
Giovanni Gherdovich | 1567c3e3 | 2020-01-22 16:16:12 +0100 | [diff] [blame] | 219 | #endif |
| 220 | |
Nathan Fontenot | 41ea667 | 2020-11-12 19:26:12 +0100 | [diff] [blame] | 221 | #ifdef CONFIG_ACPI_CPPC_LIB |
| 222 | void init_freq_invariance_cppc(void); |
| 223 | #define init_freq_invariance_cppc init_freq_invariance_cppc |
| 224 | #endif |
| 225 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 226 | #endif /* _ASM_X86_TOPOLOGY_H */ |