Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Resource Director Technology(RDT) |
| 3 | * - Cache Allocation code. |
| 4 | * |
| 5 | * Copyright (C) 2016 Intel Corporation |
| 6 | * |
| 7 | * Authors: |
| 8 | * Fenghua Yu <fenghua.yu@intel.com> |
| 9 | * Tony Luck <tony.luck@intel.com> |
| 10 | * Vikas Shivappa <vikas.shivappa@intel.com> |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify it |
| 13 | * under the terms and conditions of the GNU General Public License, |
| 14 | * version 2, as published by the Free Software Foundation. |
| 15 | * |
| 16 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 19 | * more details. |
| 20 | * |
| 21 | * More information about RDT be found in the Intel (R) x86 Architecture |
| 22 | * Software Developer Manual June 2016, volume 3, section 17.17. |
| 23 | */ |
| 24 | |
| 25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 26 | |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/err.h> |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 29 | #include <linux/cacheinfo.h> |
| 30 | #include <linux/cpuhotplug.h> |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 31 | |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 32 | #include <asm/intel-family.h> |
Vikas Shivappa | 0583020 | 2017-07-25 14:14:23 -0700 | [diff] [blame] | 33 | #include <asm/intel_rdt_sched.h> |
| 34 | #include "intel_rdt.h" |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 35 | |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 36 | #define MAX_MBA_BW 100u |
| 37 | #define MBA_IS_LINEAR 0x4 |
| 38 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 39 | /* Mutex to protect rdtgroup access. */ |
| 40 | DEFINE_MUTEX(rdtgroup_mutex); |
| 41 | |
Vikas Shivappa | de016df | 2017-04-03 14:44:17 -0700 | [diff] [blame] | 42 | /* |
Vikas Shivappa | c39a0e2 | 2017-07-25 14:14:20 -0700 | [diff] [blame] | 43 | * The cached intel_pqr_state is strictly per CPU and can never be |
| 44 | * updated from a remote CPU. Functions which modify the state |
| 45 | * are called with interrupts disabled and no preemption, which |
| 46 | * is sufficient for the protection. |
| 47 | */ |
| 48 | DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); |
| 49 | |
| 50 | /* |
Vikas Shivappa | de016df | 2017-04-03 14:44:17 -0700 | [diff] [blame] | 51 | * Used to store the max resource name width and max resource data width |
| 52 | * to display the schemata in a tabular format |
| 53 | */ |
| 54 | int max_name_width, max_data_width; |
| 55 | |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 56 | /* |
| 57 | * Global boolean for rdt_alloc which is true if any |
| 58 | * resource allocation is enabled. |
| 59 | */ |
| 60 | bool rdt_alloc_capable; |
| 61 | |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 62 | static void |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 63 | mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); |
| 64 | static void |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 65 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); |
| 66 | |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 67 | #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains) |
| 68 | |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 69 | struct rdt_resource rdt_resources_all[] = { |
Vikas Shivappa | dd13185 | 2017-07-25 14:14:26 -0700 | [diff] [blame] | 70 | [RDT_RESOURCE_L3] = |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 71 | { |
Vikas Shivappa | d89b737 | 2017-07-25 14:14:38 -0700 | [diff] [blame] | 72 | .rid = RDT_RESOURCE_L3, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 73 | .name = "L3", |
| 74 | .domains = domain_init(RDT_RESOURCE_L3), |
| 75 | .msr_base = IA32_L3_CBM_BASE, |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 76 | .msr_update = cat_wrmsr, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 77 | .cache_level = 3, |
| 78 | .cache = { |
| 79 | .min_cbm_bits = 1, |
| 80 | .cbm_idx_mult = 1, |
| 81 | .cbm_idx_offset = 0, |
| 82 | }, |
Vikas Shivappa | c6ea67d | 2017-04-07 17:33:56 -0700 | [diff] [blame] | 83 | .parse_ctrlval = parse_cbm, |
| 84 | .format_str = "%d=%0*x", |
Tony luck | 5dc1d5c | 2017-07-25 14:14:29 -0700 | [diff] [blame] | 85 | .fflags = RFTYPE_RES_CACHE, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 86 | }, |
Vikas Shivappa | dd13185 | 2017-07-25 14:14:26 -0700 | [diff] [blame] | 87 | [RDT_RESOURCE_L3DATA] = |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 88 | { |
Vikas Shivappa | d89b737 | 2017-07-25 14:14:38 -0700 | [diff] [blame] | 89 | .rid = RDT_RESOURCE_L3DATA, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 90 | .name = "L3DATA", |
| 91 | .domains = domain_init(RDT_RESOURCE_L3DATA), |
| 92 | .msr_base = IA32_L3_CBM_BASE, |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 93 | .msr_update = cat_wrmsr, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 94 | .cache_level = 3, |
| 95 | .cache = { |
| 96 | .min_cbm_bits = 1, |
| 97 | .cbm_idx_mult = 2, |
| 98 | .cbm_idx_offset = 0, |
| 99 | }, |
Vikas Shivappa | c6ea67d | 2017-04-07 17:33:56 -0700 | [diff] [blame] | 100 | .parse_ctrlval = parse_cbm, |
| 101 | .format_str = "%d=%0*x", |
Tony luck | 5dc1d5c | 2017-07-25 14:14:29 -0700 | [diff] [blame] | 102 | .fflags = RFTYPE_RES_CACHE, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 103 | }, |
Vikas Shivappa | dd13185 | 2017-07-25 14:14:26 -0700 | [diff] [blame] | 104 | [RDT_RESOURCE_L3CODE] = |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 105 | { |
Vikas Shivappa | d89b737 | 2017-07-25 14:14:38 -0700 | [diff] [blame] | 106 | .rid = RDT_RESOURCE_L3CODE, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 107 | .name = "L3CODE", |
| 108 | .domains = domain_init(RDT_RESOURCE_L3CODE), |
| 109 | .msr_base = IA32_L3_CBM_BASE, |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 110 | .msr_update = cat_wrmsr, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 111 | .cache_level = 3, |
| 112 | .cache = { |
| 113 | .min_cbm_bits = 1, |
| 114 | .cbm_idx_mult = 2, |
| 115 | .cbm_idx_offset = 1, |
| 116 | }, |
Vikas Shivappa | c6ea67d | 2017-04-07 17:33:56 -0700 | [diff] [blame] | 117 | .parse_ctrlval = parse_cbm, |
| 118 | .format_str = "%d=%0*x", |
Tony luck | 5dc1d5c | 2017-07-25 14:14:29 -0700 | [diff] [blame] | 119 | .fflags = RFTYPE_RES_CACHE, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 120 | }, |
Vikas Shivappa | dd13185 | 2017-07-25 14:14:26 -0700 | [diff] [blame] | 121 | [RDT_RESOURCE_L2] = |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 122 | { |
Vikas Shivappa | d89b737 | 2017-07-25 14:14:38 -0700 | [diff] [blame] | 123 | .rid = RDT_RESOURCE_L2, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 124 | .name = "L2", |
| 125 | .domains = domain_init(RDT_RESOURCE_L2), |
| 126 | .msr_base = IA32_L2_CBM_BASE, |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 127 | .msr_update = cat_wrmsr, |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 128 | .cache_level = 2, |
| 129 | .cache = { |
| 130 | .min_cbm_bits = 1, |
| 131 | .cbm_idx_mult = 1, |
| 132 | .cbm_idx_offset = 0, |
| 133 | }, |
Vikas Shivappa | c6ea67d | 2017-04-07 17:33:56 -0700 | [diff] [blame] | 134 | .parse_ctrlval = parse_cbm, |
| 135 | .format_str = "%d=%0*x", |
Tony luck | 5dc1d5c | 2017-07-25 14:14:29 -0700 | [diff] [blame] | 136 | .fflags = RFTYPE_RES_CACHE, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 137 | }, |
Fenghua Yu | def1085 | 2017-12-20 14:57:22 -0800 | [diff] [blame] | 138 | [RDT_RESOURCE_L2DATA] = |
| 139 | { |
| 140 | .rid = RDT_RESOURCE_L2DATA, |
| 141 | .name = "L2DATA", |
| 142 | .domains = domain_init(RDT_RESOURCE_L2DATA), |
| 143 | .msr_base = IA32_L2_CBM_BASE, |
| 144 | .msr_update = cat_wrmsr, |
| 145 | .cache_level = 2, |
| 146 | .cache = { |
| 147 | .min_cbm_bits = 1, |
| 148 | .cbm_idx_mult = 2, |
| 149 | .cbm_idx_offset = 0, |
| 150 | }, |
| 151 | .parse_ctrlval = parse_cbm, |
| 152 | .format_str = "%d=%0*x", |
| 153 | .fflags = RFTYPE_RES_CACHE, |
| 154 | }, |
| 155 | [RDT_RESOURCE_L2CODE] = |
| 156 | { |
| 157 | .rid = RDT_RESOURCE_L2CODE, |
| 158 | .name = "L2CODE", |
| 159 | .domains = domain_init(RDT_RESOURCE_L2CODE), |
| 160 | .msr_base = IA32_L2_CBM_BASE, |
| 161 | .msr_update = cat_wrmsr, |
| 162 | .cache_level = 2, |
| 163 | .cache = { |
| 164 | .min_cbm_bits = 1, |
| 165 | .cbm_idx_mult = 2, |
| 166 | .cbm_idx_offset = 1, |
| 167 | }, |
| 168 | .parse_ctrlval = parse_cbm, |
| 169 | .format_str = "%d=%0*x", |
| 170 | .fflags = RFTYPE_RES_CACHE, |
| 171 | }, |
Vikas Shivappa | dd13185 | 2017-07-25 14:14:26 -0700 | [diff] [blame] | 172 | [RDT_RESOURCE_MBA] = |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 173 | { |
Vikas Shivappa | d89b737 | 2017-07-25 14:14:38 -0700 | [diff] [blame] | 174 | .rid = RDT_RESOURCE_MBA, |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 175 | .name = "MB", |
| 176 | .domains = domain_init(RDT_RESOURCE_MBA), |
| 177 | .msr_base = IA32_MBA_THRTL_BASE, |
| 178 | .msr_update = mba_wrmsr, |
| 179 | .cache_level = 3, |
Vikas Shivappa | 64e8ed3 | 2017-04-07 17:33:57 -0700 | [diff] [blame] | 180 | .parse_ctrlval = parse_bw, |
| 181 | .format_str = "%d=%*d", |
Tony luck | 5dc1d5c | 2017-07-25 14:14:29 -0700 | [diff] [blame] | 182 | .fflags = RFTYPE_RES_MB, |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 183 | }, |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 184 | }; |
| 185 | |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 186 | static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 187 | { |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 188 | return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset; |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 189 | } |
| 190 | |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 191 | /* |
| 192 | * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs |
| 193 | * as they do not have CPUID enumeration support for Cache allocation. |
| 194 | * The check for Vendor/Family/Model is not enough to guarantee that |
| 195 | * the MSRs won't #GP fault because only the following SKUs support |
| 196 | * CAT: |
| 197 | * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz |
| 198 | * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz |
| 199 | * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz |
| 200 | * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz |
| 201 | * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz |
| 202 | * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz |
| 203 | * |
| 204 | * Probe by trying to write the first of the L3 cach mask registers |
| 205 | * and checking that the bits stick. Max CLOSids is always 4 and max cbm length |
| 206 | * is always 20 on hsw server parts. The minimum cache bitmask length |
| 207 | * allowed for HSW server is always 2 bits. Hardcode all of them. |
| 208 | */ |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 209 | static inline void cache_alloc_hsw_probe(void) |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 210 | { |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 211 | struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; |
| 212 | u32 l, h, max_cbm = BIT_MASK(20) - 1; |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 213 | |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 214 | if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0)) |
| 215 | return; |
| 216 | rdmsr(IA32_L3_CBM_BASE, l, h); |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 217 | |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 218 | /* If all the bits were set in MSR, return success */ |
| 219 | if (l != max_cbm) |
| 220 | return; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 221 | |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 222 | r->num_closid = 4; |
| 223 | r->default_ctrl = max_cbm; |
| 224 | r->cache.cbm_len = 20; |
| 225 | r->cache.shareable_bits = 0xc0000; |
| 226 | r->cache.min_cbm_bits = 2; |
| 227 | r->alloc_capable = true; |
| 228 | r->alloc_enabled = true; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 229 | |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 230 | rdt_alloc_capable = true; |
Fenghua Yu | 113c609 | 2016-10-22 06:19:54 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 233 | /* |
| 234 | * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values |
| 235 | * exposed to user interface and the h/w understandable delay values. |
| 236 | * |
| 237 | * The non-linear delay values have the granularity of power of two |
| 238 | * and also the h/w does not guarantee a curve for configured delay |
| 239 | * values vs. actual b/w enforced. |
| 240 | * Hence we need a mapping that is pre calibrated so the user can |
| 241 | * express the memory b/w as a percentage value. |
| 242 | */ |
| 243 | static inline bool rdt_get_mb_table(struct rdt_resource *r) |
| 244 | { |
| 245 | /* |
| 246 | * There are no Intel SKUs as of now to support non-linear delay. |
| 247 | */ |
| 248 | pr_info("MBA b/w map not implemented for cpu:%d, model:%d", |
| 249 | boot_cpu_data.x86, boot_cpu_data.x86_model); |
| 250 | |
| 251 | return false; |
| 252 | } |
| 253 | |
| 254 | static bool rdt_get_mem_config(struct rdt_resource *r) |
| 255 | { |
| 256 | union cpuid_0x10_3_eax eax; |
| 257 | union cpuid_0x10_x_edx edx; |
| 258 | u32 ebx, ecx; |
| 259 | |
| 260 | cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full); |
| 261 | r->num_closid = edx.split.cos_max + 1; |
| 262 | r->membw.max_delay = eax.split.max_delay + 1; |
| 263 | r->default_ctrl = MAX_MBA_BW; |
| 264 | if (ecx & MBA_IS_LINEAR) { |
| 265 | r->membw.delay_linear = true; |
| 266 | r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay; |
| 267 | r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay; |
| 268 | } else { |
| 269 | if (!rdt_get_mb_table(r)) |
| 270 | return false; |
| 271 | } |
| 272 | r->data_width = 3; |
| 273 | |
Vikas Shivappa | 1b5c0b7 | 2017-07-25 14:14:25 -0700 | [diff] [blame] | 274 | r->alloc_capable = true; |
| 275 | r->alloc_enabled = true; |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 276 | |
| 277 | return true; |
| 278 | } |
| 279 | |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 280 | static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 281 | { |
| 282 | union cpuid_0x10_1_eax eax; |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 283 | union cpuid_0x10_x_edx edx; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 284 | u32 ebx, ecx; |
| 285 | |
| 286 | cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); |
| 287 | r->num_closid = edx.split.cos_max + 1; |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 288 | r->cache.cbm_len = eax.split.cbm_len + 1; |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 289 | r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; |
Fenghua Yu | 0dd2d74 | 2017-07-25 15:39:04 -0700 | [diff] [blame] | 290 | r->cache.shareable_bits = ebx & r->default_ctrl; |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 291 | r->data_width = (r->cache.cbm_len + 3) / 4; |
Vikas Shivappa | 1b5c0b7 | 2017-07-25 14:14:25 -0700 | [diff] [blame] | 292 | r->alloc_capable = true; |
| 293 | r->alloc_enabled = true; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 294 | } |
| 295 | |
Fenghua Yu | def1085 | 2017-12-20 14:57:22 -0800 | [diff] [blame] | 296 | static void rdt_get_cdp_config(int level, int type) |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 297 | { |
Fenghua Yu | def1085 | 2017-12-20 14:57:22 -0800 | [diff] [blame] | 298 | struct rdt_resource *r_l = &rdt_resources_all[level]; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 299 | struct rdt_resource *r = &rdt_resources_all[type]; |
| 300 | |
Fenghua Yu | def1085 | 2017-12-20 14:57:22 -0800 | [diff] [blame] | 301 | r->num_closid = r_l->num_closid / 2; |
| 302 | r->cache.cbm_len = r_l->cache.cbm_len; |
| 303 | r->default_ctrl = r_l->default_ctrl; |
| 304 | r->cache.shareable_bits = r_l->cache.shareable_bits; |
Thomas Gleixner | d3e11b4 | 2017-04-14 13:00:36 +0200 | [diff] [blame] | 305 | r->data_width = (r->cache.cbm_len + 3) / 4; |
Vikas Shivappa | 1b5c0b7 | 2017-07-25 14:14:25 -0700 | [diff] [blame] | 306 | r->alloc_capable = true; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 307 | /* |
| 308 | * By default, CDP is disabled. CDP can be enabled by mount parameter |
| 309 | * "cdp" during resctrl file system mount time. |
| 310 | */ |
Vikas Shivappa | 1b5c0b7 | 2017-07-25 14:14:25 -0700 | [diff] [blame] | 311 | r->alloc_enabled = false; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 312 | } |
| 313 | |
Fenghua Yu | def1085 | 2017-12-20 14:57:22 -0800 | [diff] [blame] | 314 | static void rdt_get_cdp_l3_config(void) |
| 315 | { |
| 316 | rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA); |
| 317 | rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE); |
| 318 | } |
| 319 | |
| 320 | static void rdt_get_cdp_l2_config(void) |
| 321 | { |
| 322 | rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA); |
| 323 | rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE); |
| 324 | } |
| 325 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 326 | static int get_cache_id(int cpu, int level) |
| 327 | { |
| 328 | struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); |
| 329 | int i; |
| 330 | |
| 331 | for (i = 0; i < ci->num_leaves; i++) { |
| 332 | if (ci->info_list[i].level == level) |
| 333 | return ci->info_list[i].id; |
| 334 | } |
| 335 | |
| 336 | return -1; |
| 337 | } |
| 338 | |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 339 | /* |
| 340 | * Map the memory b/w percentage value to delay values |
| 341 | * that can be written to QOS_MSRs. |
| 342 | * There are currently no SKUs which support non linear delay values. |
| 343 | */ |
| 344 | static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) |
| 345 | { |
| 346 | if (r->membw.delay_linear) |
| 347 | return MAX_MBA_BW - bw; |
| 348 | |
| 349 | pr_warn_once("Non Linear delay-bw map not supported but queried\n"); |
| 350 | return r->default_ctrl; |
| 351 | } |
| 352 | |
| 353 | static void |
| 354 | mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) |
| 355 | { |
| 356 | unsigned int i; |
| 357 | |
| 358 | /* Write the delay values for mba. */ |
| 359 | for (i = m->low; i < m->high; i++) |
| 360 | wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r)); |
| 361 | } |
| 362 | |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 363 | static void |
| 364 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) |
| 365 | { |
| 366 | unsigned int i; |
| 367 | |
| 368 | for (i = m->low; i < m->high; i++) |
| 369 | wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]); |
| 370 | } |
| 371 | |
Vikas Shivappa | edf6fa1 | 2017-07-25 14:14:28 -0700 | [diff] [blame] | 372 | struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) |
| 373 | { |
| 374 | struct rdt_domain *d; |
| 375 | |
| 376 | list_for_each_entry(d, &r->domains, list) { |
| 377 | /* Find the domain that contains this CPU */ |
| 378 | if (cpumask_test_cpu(cpu, &d->cpu_mask)) |
| 379 | return d; |
| 380 | } |
| 381 | |
| 382 | return NULL; |
| 383 | } |
| 384 | |
Vikas Shivappa | 2545e9f | 2017-04-07 17:33:51 -0700 | [diff] [blame] | 385 | void rdt_ctrl_update(void *arg) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 386 | { |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 387 | struct msr_param *m = arg; |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 388 | struct rdt_resource *r = m->res; |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 389 | int cpu = smp_processor_id(); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 390 | struct rdt_domain *d; |
| 391 | |
Vikas Shivappa | e330268 | 2017-07-25 14:14:47 -0700 | [diff] [blame] | 392 | d = get_domain_from_cpu(cpu, r); |
| 393 | if (d) { |
| 394 | r->msr_update(d, m, r); |
| 395 | return; |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 396 | } |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 397 | pr_warn_once("cpu %d not found in any domain for resource %s\n", |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 398 | cpu, r->name); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | /* |
| 402 | * rdt_find_domain - Find a domain in a resource that matches input resource id |
| 403 | * |
| 404 | * Search resource r's domain list to find the resource id. If the resource |
| 405 | * id is found in a domain, return the domain. Otherwise, if requested by |
| 406 | * caller, return the first domain whose id is bigger than the input id. |
| 407 | * The domain list is sorted by id in ascending order. |
| 408 | */ |
Vikas Shivappa | d89b737 | 2017-07-25 14:14:38 -0700 | [diff] [blame] | 409 | struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, |
| 410 | struct list_head **pos) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 411 | { |
| 412 | struct rdt_domain *d; |
| 413 | struct list_head *l; |
| 414 | |
| 415 | if (id < 0) |
| 416 | return ERR_PTR(id); |
| 417 | |
| 418 | list_for_each(l, &r->domains) { |
| 419 | d = list_entry(l, struct rdt_domain, list); |
| 420 | /* When id is found, return its domain. */ |
| 421 | if (id == d->id) |
| 422 | return d; |
| 423 | /* Stop searching when finding id's position in sorted list. */ |
| 424 | if (id < d->id) |
| 425 | break; |
| 426 | } |
| 427 | |
| 428 | if (pos) |
| 429 | *pos = l; |
| 430 | |
| 431 | return NULL; |
| 432 | } |
| 433 | |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 434 | static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) |
| 435 | { |
| 436 | struct msr_param m; |
| 437 | u32 *dc; |
| 438 | int i; |
| 439 | |
| 440 | dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL); |
| 441 | if (!dc) |
| 442 | return -ENOMEM; |
| 443 | |
| 444 | d->ctrl_val = dc; |
| 445 | |
| 446 | /* |
| 447 | * Initialize the Control MSRs to having no control. |
| 448 | * For Cache Allocation: Set all bits in cbm |
| 449 | * For Memory Allocation: Set b/w requested to 100 |
| 450 | */ |
| 451 | for (i = 0; i < r->num_closid; i++, dc++) |
| 452 | *dc = r->default_ctrl; |
| 453 | |
| 454 | m.low = 0; |
| 455 | m.high = r->num_closid; |
| 456 | r->msr_update(d, &m, r); |
| 457 | return 0; |
| 458 | } |
| 459 | |
Vikas Shivappa | edf6fa1 | 2017-07-25 14:14:28 -0700 | [diff] [blame] | 460 | static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) |
| 461 | { |
Tony Luck | 9f52425 | 2017-07-25 14:14:45 -0700 | [diff] [blame] | 462 | size_t tsize; |
| 463 | |
Vikas Shivappa | edf6fa1 | 2017-07-25 14:14:28 -0700 | [diff] [blame] | 464 | if (is_llc_occupancy_enabled()) { |
| 465 | d->rmid_busy_llc = kcalloc(BITS_TO_LONGS(r->num_rmid), |
| 466 | sizeof(unsigned long), |
| 467 | GFP_KERNEL); |
| 468 | if (!d->rmid_busy_llc) |
| 469 | return -ENOMEM; |
Vikas Shivappa | 24247ae | 2017-08-15 18:00:43 -0700 | [diff] [blame] | 470 | INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); |
Vikas Shivappa | edf6fa1 | 2017-07-25 14:14:28 -0700 | [diff] [blame] | 471 | } |
Tony Luck | 9f52425 | 2017-07-25 14:14:45 -0700 | [diff] [blame] | 472 | if (is_mbm_total_enabled()) { |
| 473 | tsize = sizeof(*d->mbm_total); |
| 474 | d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); |
| 475 | if (!d->mbm_total) { |
| 476 | kfree(d->rmid_busy_llc); |
| 477 | return -ENOMEM; |
| 478 | } |
| 479 | } |
| 480 | if (is_mbm_local_enabled()) { |
| 481 | tsize = sizeof(*d->mbm_local); |
| 482 | d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); |
| 483 | if (!d->mbm_local) { |
| 484 | kfree(d->rmid_busy_llc); |
| 485 | kfree(d->mbm_total); |
| 486 | return -ENOMEM; |
| 487 | } |
| 488 | } |
Vikas Shivappa | edf6fa1 | 2017-07-25 14:14:28 -0700 | [diff] [blame] | 489 | |
Vikas Shivappa | e330268 | 2017-07-25 14:14:47 -0700 | [diff] [blame] | 490 | if (is_mbm_enabled()) { |
| 491 | INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); |
Vikas Shivappa | bbc4615 | 2017-08-15 18:00:42 -0700 | [diff] [blame] | 492 | mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL); |
Vikas Shivappa | e330268 | 2017-07-25 14:14:47 -0700 | [diff] [blame] | 493 | } |
| 494 | |
Vikas Shivappa | edf6fa1 | 2017-07-25 14:14:28 -0700 | [diff] [blame] | 495 | return 0; |
| 496 | } |
| 497 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 498 | /* |
| 499 | * domain_add_cpu - Add a cpu to a resource's domain list. |
| 500 | * |
| 501 | * If an existing domain in the resource r's domain list matches the cpu's |
| 502 | * resource id, add the cpu in the domain. |
| 503 | * |
| 504 | * Otherwise, a new domain is allocated and inserted into the right position |
| 505 | * in the domain list sorted by id in ascending order. |
| 506 | * |
| 507 | * The order in the domain list is visible to users when we print entries |
| 508 | * in the schemata file and schemata input is validated to have the same order |
| 509 | * as this list. |
| 510 | */ |
| 511 | static void domain_add_cpu(int cpu, struct rdt_resource *r) |
| 512 | { |
Thomas Gleixner | 0921c54 | 2017-04-14 14:14:31 +0200 | [diff] [blame] | 513 | int id = get_cache_id(cpu, r->cache_level); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 514 | struct list_head *add_pos = NULL; |
| 515 | struct rdt_domain *d; |
| 516 | |
| 517 | d = rdt_find_domain(r, id, &add_pos); |
| 518 | if (IS_ERR(d)) { |
| 519 | pr_warn("Could't find cache id for cpu %d\n", cpu); |
| 520 | return; |
| 521 | } |
| 522 | |
| 523 | if (d) { |
| 524 | cpumask_set_cpu(cpu, &d->cpu_mask); |
| 525 | return; |
| 526 | } |
| 527 | |
| 528 | d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu)); |
| 529 | if (!d) |
| 530 | return; |
| 531 | |
| 532 | d->id = id; |
Tony Luck | 9f52425 | 2017-07-25 14:14:45 -0700 | [diff] [blame] | 533 | cpumask_set_cpu(cpu, &d->cpu_mask); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 534 | |
Vikas Shivappa | 1b5c0b7 | 2017-07-25 14:14:25 -0700 | [diff] [blame] | 535 | if (r->alloc_capable && domain_setup_ctrlval(r, d)) { |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 536 | kfree(d); |
| 537 | return; |
| 538 | } |
| 539 | |
Vikas Shivappa | edf6fa1 | 2017-07-25 14:14:28 -0700 | [diff] [blame] | 540 | if (r->mon_capable && domain_setup_mon_state(r, d)) { |
| 541 | kfree(d); |
| 542 | return; |
| 543 | } |
| 544 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 545 | list_add_tail(&d->list, add_pos); |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 546 | |
| 547 | /* |
| 548 | * If resctrl is mounted, add |
| 549 | * per domain monitor data directories. |
| 550 | */ |
| 551 | if (static_branch_unlikely(&rdt_mon_enable_key)) |
| 552 | mkdir_mondata_subdir_allrdtgrp(r, d); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 553 | } |
| 554 | |
| 555 | static void domain_remove_cpu(int cpu, struct rdt_resource *r) |
| 556 | { |
| 557 | int id = get_cache_id(cpu, r->cache_level); |
| 558 | struct rdt_domain *d; |
| 559 | |
| 560 | d = rdt_find_domain(r, id, NULL); |
| 561 | if (IS_ERR_OR_NULL(d)) { |
| 562 | pr_warn("Could't find cache id for cpu %d\n", cpu); |
| 563 | return; |
| 564 | } |
| 565 | |
| 566 | cpumask_clear_cpu(cpu, &d->cpu_mask); |
| 567 | if (cpumask_empty(&d->cpu_mask)) { |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 568 | /* |
| 569 | * If resctrl is mounted, remove all the |
| 570 | * per domain monitor data directories. |
| 571 | */ |
| 572 | if (static_branch_unlikely(&rdt_mon_enable_key)) |
| 573 | rmdir_mondata_subdir_allrdtgrp(r, d->id); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 574 | list_del(&d->list); |
Vikas Shivappa | e330268 | 2017-07-25 14:14:47 -0700 | [diff] [blame] | 575 | if (is_mbm_enabled()) |
| 576 | cancel_delayed_work(&d->mbm_over); |
Vikas Shivappa | 24247ae | 2017-08-15 18:00:43 -0700 | [diff] [blame] | 577 | if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { |
| 578 | /* |
| 579 | * When a package is going down, forcefully |
| 580 | * decrement rmid->ebusy. There is no way to know |
| 581 | * that the L3 was flushed and hence may lead to |
| 582 | * incorrect counts in rare scenarios, but leaving |
| 583 | * the RMID as busy creates RMID leaks if the |
| 584 | * package never comes back. |
| 585 | */ |
| 586 | __check_limbo(d, true); |
| 587 | cancel_delayed_work(&d->cqm_limbo); |
| 588 | } |
| 589 | |
Thomas Gleixner | d479244 | 2018-01-16 19:59:59 +0100 | [diff] [blame] | 590 | kfree(d->ctrl_val); |
| 591 | kfree(d->rmid_busy_llc); |
| 592 | kfree(d->mbm_total); |
| 593 | kfree(d->mbm_local); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 594 | kfree(d); |
Vikas Shivappa | 24247ae | 2017-08-15 18:00:43 -0700 | [diff] [blame] | 595 | return; |
| 596 | } |
| 597 | |
| 598 | if (r == &rdt_resources_all[RDT_RESOURCE_L3]) { |
| 599 | if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { |
| 600 | cancel_delayed_work(&d->mbm_over); |
| 601 | mbm_setup_overflow_handler(d, 0); |
| 602 | } |
| 603 | if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && |
| 604 | has_busy_rmid(r, d)) { |
| 605 | cancel_delayed_work(&d->cqm_limbo); |
| 606 | cqm_setup_limbo_handler(d, 0); |
| 607 | } |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 608 | } |
| 609 | } |
| 610 | |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 611 | static void clear_closid_rmid(int cpu) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 612 | { |
| 613 | struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 614 | |
Vikas Shivappa | a9110b5 | 2017-08-09 11:46:34 -0700 | [diff] [blame] | 615 | state->default_closid = 0; |
| 616 | state->default_rmid = 0; |
| 617 | state->cur_closid = 0; |
| 618 | state->cur_rmid = 0; |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 619 | wrmsr(IA32_PQR_ASSOC, 0, 0); |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 620 | } |
| 621 | |
| 622 | static int intel_rdt_online_cpu(unsigned int cpu) |
| 623 | { |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 624 | struct rdt_resource *r; |
| 625 | |
| 626 | mutex_lock(&rdtgroup_mutex); |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 627 | for_each_capable_rdt_resource(r) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 628 | domain_add_cpu(cpu, r); |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 629 | /* The cpu is set in default rdtgroup after online. */ |
| 630 | cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 631 | clear_closid_rmid(cpu); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 632 | mutex_unlock(&rdtgroup_mutex); |
| 633 | |
| 634 | return 0; |
| 635 | } |
| 636 | |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 637 | static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) |
| 638 | { |
| 639 | struct rdtgroup *cr; |
| 640 | |
| 641 | list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { |
| 642 | if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) { |
| 643 | break; |
| 644 | } |
| 645 | } |
| 646 | } |
| 647 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 648 | static int intel_rdt_offline_cpu(unsigned int cpu) |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 649 | { |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 650 | struct rdtgroup *rdtgrp; |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 651 | struct rdt_resource *r; |
| 652 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 653 | mutex_lock(&rdtgroup_mutex); |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 654 | for_each_capable_rdt_resource(r) |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 655 | domain_remove_cpu(cpu, r); |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 656 | list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 657 | if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { |
| 658 | clear_childcpus(rdtgrp, cpu); |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 659 | break; |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 660 | } |
Tony Luck | 12e0110 | 2016-10-28 15:04:45 -0700 | [diff] [blame] | 661 | } |
Vikas Shivappa | 895c663 | 2017-07-25 14:14:44 -0700 | [diff] [blame] | 662 | clear_closid_rmid(cpu); |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 663 | mutex_unlock(&rdtgroup_mutex); |
| 664 | |
| 665 | return 0; |
| 666 | } |
| 667 | |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 668 | /* |
| 669 | * Choose a width for the resource name and resource data based on the |
| 670 | * resource that has widest name and cbm. |
| 671 | */ |
| 672 | static __init void rdt_init_padding(void) |
| 673 | { |
| 674 | struct rdt_resource *r; |
| 675 | int cl; |
| 676 | |
Vikas Shivappa | 1b5c0b7 | 2017-07-25 14:14:25 -0700 | [diff] [blame] | 677 | for_each_alloc_capable_rdt_resource(r) { |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 678 | cl = strlen(r->name); |
| 679 | if (cl > max_name_width) |
| 680 | max_name_width = cl; |
| 681 | |
| 682 | if (r->data_width > max_data_width) |
| 683 | max_data_width = r->data_width; |
| 684 | } |
| 685 | } |
| 686 | |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 687 | enum { |
| 688 | RDT_FLAG_CMT, |
| 689 | RDT_FLAG_MBM_TOTAL, |
| 690 | RDT_FLAG_MBM_LOCAL, |
| 691 | RDT_FLAG_L3_CAT, |
| 692 | RDT_FLAG_L3_CDP, |
| 693 | RDT_FLAG_L2_CAT, |
Fenghua Yu | 31516de | 2017-12-20 14:57:24 -0800 | [diff] [blame] | 694 | RDT_FLAG_L2_CDP, |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 695 | RDT_FLAG_MBA, |
| 696 | }; |
| 697 | |
| 698 | #define RDT_OPT(idx, n, f) \ |
| 699 | [idx] = { \ |
| 700 | .name = n, \ |
| 701 | .flag = f \ |
| 702 | } |
| 703 | |
| 704 | struct rdt_options { |
| 705 | char *name; |
| 706 | int flag; |
| 707 | bool force_off, force_on; |
| 708 | }; |
| 709 | |
| 710 | static struct rdt_options rdt_options[] __initdata = { |
| 711 | RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC), |
| 712 | RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL), |
| 713 | RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL), |
| 714 | RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3), |
| 715 | RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3), |
| 716 | RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2), |
Fenghua Yu | 31516de | 2017-12-20 14:57:24 -0800 | [diff] [blame] | 717 | RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2), |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 718 | RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA), |
| 719 | }; |
| 720 | #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options) |
| 721 | |
| 722 | static int __init set_rdt_options(char *str) |
| 723 | { |
| 724 | struct rdt_options *o; |
| 725 | bool force_off; |
| 726 | char *tok; |
| 727 | |
| 728 | if (*str == '=') |
| 729 | str++; |
| 730 | while ((tok = strsep(&str, ",")) != NULL) { |
| 731 | force_off = *tok == '!'; |
| 732 | if (force_off) |
| 733 | tok++; |
| 734 | for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { |
| 735 | if (strcmp(tok, o->name) == 0) { |
| 736 | if (force_off) |
| 737 | o->force_off = true; |
| 738 | else |
| 739 | o->force_on = true; |
| 740 | break; |
| 741 | } |
| 742 | } |
| 743 | } |
| 744 | return 1; |
| 745 | } |
| 746 | __setup("rdt", set_rdt_options); |
| 747 | |
| 748 | static bool __init rdt_cpu_has(int flag) |
| 749 | { |
| 750 | bool ret = boot_cpu_has(flag); |
| 751 | struct rdt_options *o; |
| 752 | |
| 753 | if (!ret) |
| 754 | return ret; |
| 755 | |
| 756 | for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { |
| 757 | if (flag == o->flag) { |
| 758 | if (o->force_off) |
| 759 | ret = false; |
| 760 | if (o->force_on) |
| 761 | ret = true; |
| 762 | break; |
| 763 | } |
| 764 | } |
| 765 | return ret; |
| 766 | } |
| 767 | |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 768 | static __init bool get_rdt_alloc_resources(void) |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 769 | { |
| 770 | bool ret = false; |
| 771 | |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 772 | if (rdt_alloc_capable) |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 773 | return true; |
| 774 | |
| 775 | if (!boot_cpu_has(X86_FEATURE_RDT_A)) |
| 776 | return false; |
| 777 | |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 778 | if (rdt_cpu_has(X86_FEATURE_CAT_L3)) { |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 779 | rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]); |
Fenghua Yu | def1085 | 2017-12-20 14:57:22 -0800 | [diff] [blame] | 780 | if (rdt_cpu_has(X86_FEATURE_CDP_L3)) |
| 781 | rdt_get_cdp_l3_config(); |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 782 | ret = true; |
| 783 | } |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 784 | if (rdt_cpu_has(X86_FEATURE_CAT_L2)) { |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 785 | /* CPUID 0x10.2 fields are same format at 0x10.1 */ |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 786 | rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]); |
Fenghua Yu | def1085 | 2017-12-20 14:57:22 -0800 | [diff] [blame] | 787 | if (rdt_cpu_has(X86_FEATURE_CDP_L2)) |
| 788 | rdt_get_cdp_l2_config(); |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 789 | ret = true; |
| 790 | } |
Vikas Shivappa | ab66a33 | 2017-04-07 17:33:52 -0700 | [diff] [blame] | 791 | |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 792 | if (rdt_cpu_has(X86_FEATURE_MBA)) { |
Vikas Shivappa | 05b9341 | 2017-04-07 17:33:53 -0700 | [diff] [blame] | 793 | if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA])) |
| 794 | ret = true; |
| 795 | } |
Thomas Gleixner | 70a1ee9 | 2017-04-14 14:07:47 +0200 | [diff] [blame] | 796 | return ret; |
| 797 | } |
| 798 | |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 799 | static __init bool get_rdt_mon_resources(void) |
| 800 | { |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 801 | if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC)) |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 802 | rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID); |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 803 | if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 804 | rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID); |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 805 | if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 806 | rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID); |
| 807 | |
| 808 | if (!rdt_mon_features) |
| 809 | return false; |
| 810 | |
| 811 | return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]); |
| 812 | } |
| 813 | |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 814 | static __init void rdt_quirks(void) |
| 815 | { |
| 816 | switch (boot_cpu_data.x86_model) { |
| 817 | case INTEL_FAM6_HASWELL_X: |
Tony Luck | 1d9807f | 2017-08-24 09:26:51 -0700 | [diff] [blame] | 818 | if (!rdt_options[RDT_FLAG_L3_CAT].force_off) |
| 819 | cache_alloc_hsw_probe(); |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 820 | break; |
Tony Luck | d56593e | 2017-08-24 09:26:52 -0700 | [diff] [blame] | 821 | case INTEL_FAM6_SKYLAKE_X: |
Jia Zhang | b399151 | 2018-01-01 09:52:10 +0800 | [diff] [blame] | 822 | if (boot_cpu_data.x86_stepping <= 4) |
Tony Luck | d56593e | 2017-08-24 09:26:52 -0700 | [diff] [blame] | 823 | set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 824 | } |
| 825 | } |
| 826 | |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 827 | static __init bool get_rdt_resources(void) |
| 828 | { |
Tony Luck | 0576113 | 2017-08-24 09:26:50 -0700 | [diff] [blame] | 829 | rdt_quirks(); |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 830 | rdt_alloc_capable = get_rdt_alloc_resources(); |
| 831 | rdt_mon_capable = get_rdt_mon_resources(); |
| 832 | |
| 833 | return (rdt_mon_capable || rdt_alloc_capable); |
| 834 | } |
| 835 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 836 | static int __init intel_rdt_late_init(void) |
| 837 | { |
| 838 | struct rdt_resource *r; |
Fenghua Yu | 5ff193f | 2016-10-28 15:04:42 -0700 | [diff] [blame] | 839 | int state, ret; |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 840 | |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 841 | if (!get_rdt_resources()) |
| 842 | return -ENODEV; |
| 843 | |
Thomas Gleixner | 06b57e4 | 2017-04-14 14:06:26 +0200 | [diff] [blame] | 844 | rdt_init_padding(); |
| 845 | |
Tony Luck | 2264d9c | 2016-10-28 15:04:41 -0700 | [diff] [blame] | 846 | state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, |
| 847 | "x86/rdt/cat:online:", |
| 848 | intel_rdt_online_cpu, intel_rdt_offline_cpu); |
| 849 | if (state < 0) |
| 850 | return state; |
| 851 | |
Fenghua Yu | 5ff193f | 2016-10-28 15:04:42 -0700 | [diff] [blame] | 852 | ret = rdtgroup_init(); |
| 853 | if (ret) { |
| 854 | cpuhp_remove_state(state); |
| 855 | return ret; |
| 856 | } |
| 857 | |
Vikas Shivappa | 1b5c0b7 | 2017-07-25 14:14:25 -0700 | [diff] [blame] | 858 | for_each_alloc_capable_rdt_resource(r) |
Fenghua Yu | c1c7c3f | 2016-10-22 06:19:55 -0700 | [diff] [blame] | 859 | pr_info("Intel RDT %s allocation detected\n", r->name); |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 860 | |
Vikas Shivappa | 6a445ed | 2017-07-25 14:14:27 -0700 | [diff] [blame] | 861 | for_each_mon_capable_rdt_resource(r) |
| 862 | pr_info("Intel RDT %s monitoring detected\n", r->name); |
| 863 | |
Fenghua Yu | 78e99b4 | 2016-10-22 06:19:53 -0700 | [diff] [blame] | 864 | return 0; |
| 865 | } |
| 866 | |
| 867 | late_initcall(intel_rdt_late_init); |