Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Resource Director Technology(RDT) |
| 3 | * - Cache Allocation code. |
| 4 | * |
| 5 | * Copyright (C) 2016 Intel Corporation |
| 6 | * |
| 7 | * Authors: |
| 8 | * Fenghua Yu <fenghua.yu@intel.com> |
| 9 | * Tony Luck <tony.luck@intel.com> |
| 10 | * Vikas Shivappa <vikas.shivappa@intel.com> |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify it |
| 13 | * under the terms and conditions of the GNU General Public License, |
| 14 | * version 2, as published by the Free Software Foundation. |
| 15 | * |
| 16 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 19 | * more details. |
| 20 | * |
| 21 | * More information about RDT be found in the Intel (R) x86 Architecture |
| 22 | * Software Developer Manual June 2016, volume 3, section 17.17. |
| 23 | */ |
| 24 | |
| 25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 26 | |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/err.h> |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 29 | #include <linux/cacheinfo.h> |
| 30 | #include <linux/cpuhotplug.h> |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 31 | |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 32 | #include <asm/intel-family.h> |
| 33 | #include <asm/intel_rdt.h> |
| 34 | |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame^] | 35 | #define MAX_MBA_BW 100u |
| 36 | #define MBA_IS_LINEAR 0x4 |
| 37 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 38 | /* Mutex to protect rdtgroup access. */ |
| 39 | DEFINE_MUTEX(rdtgroup_mutex); |
| 40 | |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 41 | DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid); |
| 42 | |
Vikas Shivappa | de016df | 2017-04-03 14:44:17 -0700 | [diff] [blame] | 43 | /* |
| 44 | * Used to store the max resource name width and max resource data width |
| 45 | * to display the schemata in a tabular format |
| 46 | */ |
| 47 | int max_name_width, max_data_width; |
| 48 | |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 49 | static void |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame^] | 50 | mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); |
| 51 | static void |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 52 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); |
| 53 | |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 54 | #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains) |
| 55 | |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 56 | struct rdt_resource rdt_resources_all[] = { |
| 57 | { |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 58 | .name = "L3", |
| 59 | .domains = domain_init(RDT_RESOURCE_L3), |
| 60 | .msr_base = IA32_L3_CBM_BASE, |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 61 | .msr_update = cat_wrmsr, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 62 | .cache_level = 3, |
| 63 | .cache = { |
| 64 | .min_cbm_bits = 1, |
| 65 | .cbm_idx_mult = 1, |
| 66 | .cbm_idx_offset = 0, |
| 67 | }, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 68 | }, |
| 69 | { |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 70 | .name = "L3DATA", |
| 71 | .domains = domain_init(RDT_RESOURCE_L3DATA), |
| 72 | .msr_base = IA32_L3_CBM_BASE, |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 73 | .msr_update = cat_wrmsr, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 74 | .cache_level = 3, |
| 75 | .cache = { |
| 76 | .min_cbm_bits = 1, |
| 77 | .cbm_idx_mult = 2, |
| 78 | .cbm_idx_offset = 0, |
| 79 | }, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 80 | }, |
| 81 | { |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 82 | .name = "L3CODE", |
| 83 | .domains = domain_init(RDT_RESOURCE_L3CODE), |
| 84 | .msr_base = IA32_L3_CBM_BASE, |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 85 | .msr_update = cat_wrmsr, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 86 | .cache_level = 3, |
| 87 | .cache = { |
| 88 | .min_cbm_bits = 1, |
| 89 | .cbm_idx_mult = 2, |
| 90 | .cbm_idx_offset = 1, |
| 91 | }, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 92 | }, |
| 93 | { |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 94 | .name = "L2", |
| 95 | .domains = domain_init(RDT_RESOURCE_L2), |
| 96 | .msr_base = IA32_L2_CBM_BASE, |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 97 | .msr_update = cat_wrmsr, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 98 | .cache_level = 2, |
| 99 | .cache = { |
| 100 | .min_cbm_bits = 1, |
| 101 | .cbm_idx_mult = 1, |
| 102 | .cbm_idx_offset = 0, |
| 103 | }, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 104 | }, |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame^] | 105 | { |
| 106 | .name = "MB", |
| 107 | .domains = domain_init(RDT_RESOURCE_MBA), |
| 108 | .msr_base = IA32_MBA_THRTL_BASE, |
| 109 | .msr_update = mba_wrmsr, |
| 110 | .cache_level = 3, |
| 111 | }, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 112 | }; |
| 113 | |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 114 | static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 115 | { |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 116 | return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset; |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 117 | } |
| 118 | |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 119 | /* |
| 120 | * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs |
| 121 | * as they do not have CPUID enumeration support for Cache allocation. |
| 122 | * The check for Vendor/Family/Model is not enough to guarantee that |
| 123 | * the MSRs won't #GP fault because only the following SKUs support |
| 124 | * CAT: |
| 125 | * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz |
| 126 | * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz |
| 127 | * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz |
| 128 | * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz |
| 129 | * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz |
| 130 | * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz |
| 131 | * |
| 132 | * Probe by trying to write the first of the L3 cach mask registers |
| 133 | * and checking that the bits stick. Max CLOSids is always 4 and max cbm length |
| 134 | * is always 20 on hsw server parts. The minimum cache bitmask length |
| 135 | * allowed for HSW server is always 2 bits. Hardcode all of them. |
| 136 | */ |
| 137 | static inline bool cache_alloc_hsw_probe(void) |
| 138 | { |
| 139 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
| 140 | boot_cpu_data.x86 == 6 && |
| 141 | boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) { |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 142 | struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 143 | u32 l, h, max_cbm = BIT_MASK(20) - 1; |
| 144 | |
| 145 | if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0)) |
| 146 | return false; |
| 147 | rdmsr(IA32_L3_CBM_BASE, l, h); |
| 148 | |
| 149 | /* If all the bits were set in MSR, return success */ |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 150 | if (l != max_cbm) |
| 151 | return false; |
| 152 | |
| 153 | r->num_closid = 4; |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 154 | r->default_ctrl = max_cbm; |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 155 | r->cache.cbm_len = 20; |
| 156 | r->cache.min_cbm_bits = 2; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 157 | r->capable = true; |
| 158 | r->enabled = true; |
| 159 | |
| 160 | return true; |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | return false; |
| 164 | } |
| 165 | |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame^] | 166 | /* |
| 167 | * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values |
| 168 | * exposed to user interface and the h/w understandable delay values. |
| 169 | * |
| 170 | * The non-linear delay values have the granularity of power of two |
| 171 | * and also the h/w does not guarantee a curve for configured delay |
| 172 | * values vs. actual b/w enforced. |
| 173 | * Hence we need a mapping that is pre calibrated so the user can |
| 174 | * express the memory b/w as a percentage value. |
| 175 | */ |
| 176 | static inline bool rdt_get_mb_table(struct rdt_resource *r) |
| 177 | { |
| 178 | /* |
| 179 | * There are no Intel SKUs as of now to support non-linear delay. |
| 180 | */ |
| 181 | pr_info("MBA b/w map not implemented for cpu:%d, model:%d", |
| 182 | boot_cpu_data.x86, boot_cpu_data.x86_model); |
| 183 | |
| 184 | return false; |
| 185 | } |
| 186 | |
| 187 | static bool rdt_get_mem_config(struct rdt_resource *r) |
| 188 | { |
| 189 | union cpuid_0x10_3_eax eax; |
| 190 | union cpuid_0x10_x_edx edx; |
| 191 | u32 ebx, ecx; |
| 192 | |
| 193 | cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full); |
| 194 | r->num_closid = edx.split.cos_max + 1; |
| 195 | r->membw.max_delay = eax.split.max_delay + 1; |
| 196 | r->default_ctrl = MAX_MBA_BW; |
| 197 | if (ecx & MBA_IS_LINEAR) { |
| 198 | r->membw.delay_linear = true; |
| 199 | r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay; |
| 200 | r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay; |
| 201 | } else { |
| 202 | if (!rdt_get_mb_table(r)) |
| 203 | return false; |
| 204 | } |
| 205 | r->data_width = 3; |
| 206 | |
| 207 | r->capable = true; |
| 208 | r->enabled = true; |
| 209 | |
| 210 | return true; |
| 211 | } |
| 212 | |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 213 | static void rdt_get_cache_config(int idx, struct rdt_resource *r) |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 214 | { |
| 215 | union cpuid_0x10_1_eax eax; |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 216 | union cpuid_0x10_x_edx edx; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 217 | u32 ebx, ecx; |
| 218 | |
| 219 | cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); |
| 220 | r->num_closid = edx.split.cos_max + 1; |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 221 | r->cache.cbm_len = eax.split.cbm_len + 1; |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 222 | r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 223 | r->data_width = (r->cache.cbm_len + 3) / 4; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 224 | r->capable = true; |
| 225 | r->enabled = true; |
| 226 | } |
| 227 | |
| 228 | static void rdt_get_cdp_l3_config(int type) |
| 229 | { |
| 230 | struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3]; |
| 231 | struct rdt_resource *r = &rdt_resources_all[type]; |
| 232 | |
| 233 | r->num_closid = r_l3->num_closid / 2; |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 234 | r->cache.cbm_len = r_l3->cache.cbm_len; |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 235 | r->default_ctrl = r_l3->default_ctrl; |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 236 | r->data_width = (r->cache.cbm_len + 3) / 4; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 237 | r->capable = true; |
| 238 | /* |
| 239 | * By default, CDP is disabled. CDP can be enabled by mount parameter |
| 240 | * "cdp" during resctrl file system mount time. |
| 241 | */ |
| 242 | r->enabled = false; |
| 243 | } |
| 244 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 245 | static int get_cache_id(int cpu, int level) |
| 246 | { |
| 247 | struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); |
| 248 | int i; |
| 249 | |
| 250 | for (i = 0; i < ci->num_leaves; i++) { |
| 251 | if (ci->info_list[i].level == level) |
| 252 | return ci->info_list[i].id; |
| 253 | } |
| 254 | |
| 255 | return -1; |
| 256 | } |
| 257 | |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame^] | 258 | /* |
| 259 | * Map the memory b/w percentage value to delay values |
| 260 | * that can be written to QOS_MSRs. |
| 261 | * There are currently no SKUs which support non linear delay values. |
| 262 | */ |
| 263 | static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) |
| 264 | { |
| 265 | if (r->membw.delay_linear) |
| 266 | return MAX_MBA_BW - bw; |
| 267 | |
| 268 | pr_warn_once("Non Linear delay-bw map not supported but queried\n"); |
| 269 | return r->default_ctrl; |
| 270 | } |
| 271 | |
| 272 | static void |
| 273 | mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) |
| 274 | { |
| 275 | unsigned int i; |
| 276 | |
| 277 | /* Write the delay values for mba. */ |
| 278 | for (i = m->low; i < m->high; i++) |
| 279 | wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r)); |
| 280 | } |
| 281 | |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 282 | static void |
| 283 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) |
| 284 | { |
| 285 | unsigned int i; |
| 286 | |
| 287 | for (i = m->low; i < m->high; i++) |
| 288 | wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]); |
| 289 | } |
| 290 | |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 291 | void rdt_ctrl_update(void *arg) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 292 | { |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 293 | struct msr_param *m = arg; |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 294 | struct rdt_resource *r = m->res; |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 295 | int cpu = smp_processor_id(); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 296 | struct rdt_domain *d; |
| 297 | |
| 298 | list_for_each_entry(d, &r->domains, list) { |
| 299 | /* Find the domain that contains this CPU */ |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 300 | if (cpumask_test_cpu(cpu, &d->cpu_mask)) { |
| 301 | r->msr_update(d, m, r); |
| 302 | return; |
| 303 | } |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 304 | } |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 305 | pr_warn_once("cpu %d not found in any domain for resource %s\n", |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 306 | cpu, r->name); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 307 | } |
| 308 | |
| 309 | /* |
| 310 | * rdt_find_domain - Find a domain in a resource that matches input resource id |
| 311 | * |
| 312 | * Search resource r's domain list to find the resource id. If the resource |
| 313 | * id is found in a domain, return the domain. Otherwise, if requested by |
| 314 | * caller, return the first domain whose id is bigger than the input id. |
| 315 | * The domain list is sorted by id in ascending order. |
| 316 | */ |
| 317 | static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, |
| 318 | struct list_head **pos) |
| 319 | { |
| 320 | struct rdt_domain *d; |
| 321 | struct list_head *l; |
| 322 | |
| 323 | if (id < 0) |
| 324 | return ERR_PTR(id); |
| 325 | |
| 326 | list_for_each(l, &r->domains) { |
| 327 | d = list_entry(l, struct rdt_domain, list); |
| 328 | /* When id is found, return its domain. */ |
| 329 | if (id == d->id) |
| 330 | return d; |
| 331 | /* Stop searching when finding id's position in sorted list. */ |
| 332 | if (id < d->id) |
| 333 | break; |
| 334 | } |
| 335 | |
| 336 | if (pos) |
| 337 | *pos = l; |
| 338 | |
| 339 | return NULL; |
| 340 | } |
| 341 | |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 342 | static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) |
| 343 | { |
| 344 | struct msr_param m; |
| 345 | u32 *dc; |
| 346 | int i; |
| 347 | |
| 348 | dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL); |
| 349 | if (!dc) |
| 350 | return -ENOMEM; |
| 351 | |
| 352 | d->ctrl_val = dc; |
| 353 | |
| 354 | /* |
| 355 | * Initialize the Control MSRs to having no control. |
| 356 | * For Cache Allocation: Set all bits in cbm |
| 357 | * For Memory Allocation: Set b/w requested to 100 |
| 358 | */ |
| 359 | for (i = 0; i < r->num_closid; i++, dc++) |
| 360 | *dc = r->default_ctrl; |
| 361 | |
| 362 | m.low = 0; |
| 363 | m.high = r->num_closid; |
| 364 | r->msr_update(d, &m, r); |
| 365 | return 0; |
| 366 | } |
| 367 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 368 | /* |
| 369 | * domain_add_cpu - Add a cpu to a resource's domain list. |
| 370 | * |
| 371 | * If an existing domain in the resource r's domain list matches the cpu's |
| 372 | * resource id, add the cpu in the domain. |
| 373 | * |
| 374 | * Otherwise, a new domain is allocated and inserted into the right position |
| 375 | * in the domain list sorted by id in ascending order. |
| 376 | * |
| 377 | * The order in the domain list is visible to users when we print entries |
| 378 | * in the schemata file and schemata input is validated to have the same order |
| 379 | * as this list. |
| 380 | */ |
| 381 | static void domain_add_cpu(int cpu, struct rdt_resource *r) |
| 382 | { |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 383 | int id = get_cache_id(cpu, r->cache_level); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 384 | struct list_head *add_pos = NULL; |
| 385 | struct rdt_domain *d; |
| 386 | |
| 387 | d = rdt_find_domain(r, id, &add_pos); |
| 388 | if (IS_ERR(d)) { |
| 389 | pr_warn("Could't find cache id for cpu %d\n", cpu); |
| 390 | return; |
| 391 | } |
| 392 | |
| 393 | if (d) { |
| 394 | cpumask_set_cpu(cpu, &d->cpu_mask); |
| 395 | return; |
| 396 | } |
| 397 | |
| 398 | d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu)); |
| 399 | if (!d) |
| 400 | return; |
| 401 | |
| 402 | d->id = id; |
| 403 | |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 404 | if (domain_setup_ctrlval(r, d)) { |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 405 | kfree(d); |
| 406 | return; |
| 407 | } |
| 408 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 409 | cpumask_set_cpu(cpu, &d->cpu_mask); |
| 410 | list_add_tail(&d->list, add_pos); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 411 | } |
| 412 | |
| 413 | static void domain_remove_cpu(int cpu, struct rdt_resource *r) |
| 414 | { |
| 415 | int id = get_cache_id(cpu, r->cache_level); |
| 416 | struct rdt_domain *d; |
| 417 | |
| 418 | d = rdt_find_domain(r, id, NULL); |
| 419 | if (IS_ERR_OR_NULL(d)) { |
| 420 | pr_warn("Could't find cache id for cpu %d\n", cpu); |
| 421 | return; |
| 422 | } |
| 423 | |
| 424 | cpumask_clear_cpu(cpu, &d->cpu_mask); |
| 425 | if (cpumask_empty(&d->cpu_mask)) { |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 426 | kfree(d->ctrl_val); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 427 | list_del(&d->list); |
| 428 | kfree(d); |
| 429 | } |
| 430 | } |
| 431 | |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 432 | static void clear_closid(int cpu) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 433 | { |
| 434 | struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 435 | |
| 436 | per_cpu(cpu_closid, cpu) = 0; |
| 437 | state->closid = 0; |
| 438 | wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0); |
| 439 | } |
| 440 | |
| 441 | static int intel_rdt_online_cpu(unsigned int cpu) |
| 442 | { |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 443 | struct rdt_resource *r; |
| 444 | |
| 445 | mutex_lock(&rdtgroup_mutex); |
| 446 | for_each_capable_rdt_resource(r) |
| 447 | domain_add_cpu(cpu, r); |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 448 | /* The cpu is set in default rdtgroup after online. */ |
| 449 | cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); |
| 450 | clear_closid(cpu); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 451 | mutex_unlock(&rdtgroup_mutex); |
| 452 | |
| 453 | return 0; |
| 454 | } |
| 455 | |
| 456 | static int intel_rdt_offline_cpu(unsigned int cpu) |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 457 | { |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 458 | struct rdtgroup *rdtgrp; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 459 | struct rdt_resource *r; |
| 460 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 461 | mutex_lock(&rdtgroup_mutex); |
| 462 | for_each_capable_rdt_resource(r) |
| 463 | domain_remove_cpu(cpu, r); |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 464 | list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { |
| 465 | if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) |
| 466 | break; |
| 467 | } |
| 468 | clear_closid(cpu); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 469 | mutex_unlock(&rdtgroup_mutex); |
| 470 | |
| 471 | return 0; |
| 472 | } |
| 473 | |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 474 | /* |
| 475 | * Choose a width for the resource name and resource data based on the |
| 476 | * resource that has widest name and cbm. |
| 477 | */ |
| 478 | static __init void rdt_init_padding(void) |
| 479 | { |
| 480 | struct rdt_resource *r; |
| 481 | int cl; |
| 482 | |
| 483 | for_each_enabled_rdt_resource(r) { |
| 484 | cl = strlen(r->name); |
| 485 | if (cl > max_name_width) |
| 486 | max_name_width = cl; |
| 487 | |
| 488 | if (r->data_width > max_data_width) |
| 489 | max_data_width = r->data_width; |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | static __init bool get_rdt_resources(void) |
| 494 | { |
| 495 | bool ret = false; |
| 496 | |
| 497 | if (cache_alloc_hsw_probe()) |
| 498 | return true; |
| 499 | |
| 500 | if (!boot_cpu_has(X86_FEATURE_RDT_A)) |
| 501 | return false; |
| 502 | |
| 503 | if (boot_cpu_has(X86_FEATURE_CAT_L3)) { |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 504 | rdt_get_cache_config(1, &rdt_resources_all[RDT_RESOURCE_L3]); |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 505 | if (boot_cpu_has(X86_FEATURE_CDP_L3)) { |
| 506 | rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA); |
| 507 | rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE); |
| 508 | } |
| 509 | ret = true; |
| 510 | } |
| 511 | if (boot_cpu_has(X86_FEATURE_CAT_L2)) { |
| 512 | /* CPUID 0x10.2 fields are same format at 0x10.1 */ |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 513 | rdt_get_cache_config(2, &rdt_resources_all[RDT_RESOURCE_L2]); |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 514 | ret = true; |
| 515 | } |
Vikas Shivappa | ab66a33 | 2017-04-07 17:33:52 -0700 | [diff] [blame] | 516 | |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame^] | 517 | if (boot_cpu_has(X86_FEATURE_MBA)) { |
| 518 | if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA])) |
| 519 | ret = true; |
| 520 | } |
Vikas Shivappa | ab66a33 | 2017-04-07 17:33:52 -0700 | [diff] [blame] | 521 | |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 522 | return ret; |
| 523 | } |
| 524 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 525 | static int __init intel_rdt_late_init(void) |
| 526 | { |
| 527 | struct rdt_resource *r; |
Fenghua Yu | 5ff193f | 2016-10-28 15:04:42 -0700 | [diff] [blame] | 528 | int state, ret; |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 529 | |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 530 | if (!get_rdt_resources()) |
| 531 | return -ENODEV; |
| 532 | |
Thomas Gleixner | 06b57e4 | 2017-04-14 14:06:26 +0200 | [diff] [blame] | 533 | rdt_init_padding(); |
| 534 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 535 | state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, |
| 536 | "x86/rdt/cat:online:", |
| 537 | intel_rdt_online_cpu, intel_rdt_offline_cpu); |
| 538 | if (state < 0) |
| 539 | return state; |
| 540 | |
Fenghua Yu | 5ff193f | 2016-10-28 15:04:42 -0700 | [diff] [blame] | 541 | ret = rdtgroup_init(); |
| 542 | if (ret) { |
| 543 | cpuhp_remove_state(state); |
| 544 | return ret; |
| 545 | } |
| 546 | |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 547 | for_each_capable_rdt_resource(r) |
| 548 | pr_info("Intel RDT %s allocation detected\n", r->name); |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 549 | |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | late_initcall(intel_rdt_late_init); |