blob: 2802cad0371d4faf5d3f22a97e42c8f0b3537ea0 [file] [log] [blame]
Fenghua Yu78e99b42016-10-22 06:19:53 -07001/*
2 * Resource Director Technology(RDT)
3 * - Cache Allocation code.
4 *
5 * Copyright (C) 2016 Intel Corporation
6 *
7 * Authors:
8 * Fenghua Yu <fenghua.yu@intel.com>
9 * Tony Luck <tony.luck@intel.com>
10 * Vikas Shivappa <vikas.shivappa@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * More information about RDT be found in the Intel (R) x86 Architecture
22 * Software Developer Manual June 2016, volume 3, section 17.17.
23 */
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/slab.h>
28#include <linux/err.h>
Tony Luck2264d9c2016-10-28 15:04:41 -070029#include <linux/cacheinfo.h>
30#include <linux/cpuhotplug.h>
Fenghua Yu78e99b42016-10-22 06:19:53 -070031
Fenghua Yu113c6092016-10-22 06:19:54 -070032#include <asm/intel-family.h>
33#include <asm/intel_rdt.h>
34
Tony Luck2264d9c2016-10-28 15:04:41 -070035/* Mutex to protect rdtgroup access. */
36DEFINE_MUTEX(rdtgroup_mutex);
37
Tony Luck12e01102016-10-28 15:04:45 -070038DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
39
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -070040#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
41
Vikas Shivappade016df2017-04-03 14:44:17 -070042/*
43 * Used to store the max resource name width and max resource data width
44 * to display the schemata in a tabular format
45 */
46int max_name_width, max_data_width;
47
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -070048struct rdt_resource rdt_resources_all[] = {
49 {
50 .name = "L3",
51 .domains = domain_init(RDT_RESOURCE_L3),
52 .msr_base = IA32_L3_CBM_BASE,
53 .min_cbm_bits = 1,
54 .cache_level = 3,
55 .cbm_idx_multi = 1,
56 .cbm_idx_offset = 0
57 },
58 {
59 .name = "L3DATA",
60 .domains = domain_init(RDT_RESOURCE_L3DATA),
61 .msr_base = IA32_L3_CBM_BASE,
62 .min_cbm_bits = 1,
63 .cache_level = 3,
64 .cbm_idx_multi = 2,
65 .cbm_idx_offset = 0
66 },
67 {
68 .name = "L3CODE",
69 .domains = domain_init(RDT_RESOURCE_L3CODE),
70 .msr_base = IA32_L3_CBM_BASE,
71 .min_cbm_bits = 1,
72 .cache_level = 3,
73 .cbm_idx_multi = 2,
74 .cbm_idx_offset = 1
75 },
76 {
77 .name = "L2",
78 .domains = domain_init(RDT_RESOURCE_L2),
79 .msr_base = IA32_L2_CBM_BASE,
80 .min_cbm_bits = 1,
81 .cache_level = 2,
82 .cbm_idx_multi = 1,
83 .cbm_idx_offset = 0
84 },
85};
86
Tony Luck2264d9c2016-10-28 15:04:41 -070087static int cbm_idx(struct rdt_resource *r, int closid)
88{
89 return closid * r->cbm_idx_multi + r->cbm_idx_offset;
90}
91
Fenghua Yu113c6092016-10-22 06:19:54 -070092/*
93 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
94 * as they do not have CPUID enumeration support for Cache allocation.
95 * The check for Vendor/Family/Model is not enough to guarantee that
96 * the MSRs won't #GP fault because only the following SKUs support
97 * CAT:
98 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
99 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
100 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
101 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
102 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
103 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
104 *
105 * Probe by trying to write the first of the L3 cach mask registers
106 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
107 * is always 20 on hsw server parts. The minimum cache bitmask length
108 * allowed for HSW server is always 2 bits. Hardcode all of them.
109 */
110static inline bool cache_alloc_hsw_probe(void)
111{
112 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
113 boot_cpu_data.x86 == 6 &&
114 boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) {
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700115 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
Fenghua Yu113c6092016-10-22 06:19:54 -0700116 u32 l, h, max_cbm = BIT_MASK(20) - 1;
117
118 if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
119 return false;
120 rdmsr(IA32_L3_CBM_BASE, l, h);
121
122 /* If all the bits were set in MSR, return success */
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700123 if (l != max_cbm)
124 return false;
125
126 r->num_closid = 4;
127 r->cbm_len = 20;
128 r->max_cbm = max_cbm;
129 r->min_cbm_bits = 2;
130 r->capable = true;
131 r->enabled = true;
132
133 return true;
Fenghua Yu113c6092016-10-22 06:19:54 -0700134 }
135
136 return false;
137}
138
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700139static void rdt_get_config(int idx, struct rdt_resource *r)
140{
141 union cpuid_0x10_1_eax eax;
142 union cpuid_0x10_1_edx edx;
143 u32 ebx, ecx;
144
145 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
146 r->num_closid = edx.split.cos_max + 1;
147 r->cbm_len = eax.split.cbm_len + 1;
148 r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
Vikas Shivappade016df2017-04-03 14:44:17 -0700149 r->data_width = (r->cbm_len + 3) / 4;
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700150 r->capable = true;
151 r->enabled = true;
152}
153
154static void rdt_get_cdp_l3_config(int type)
155{
156 struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
157 struct rdt_resource *r = &rdt_resources_all[type];
158
159 r->num_closid = r_l3->num_closid / 2;
160 r->cbm_len = r_l3->cbm_len;
161 r->max_cbm = r_l3->max_cbm;
Vikas Shivappade016df2017-04-03 14:44:17 -0700162 r->data_width = (r->cbm_len + 3) / 4;
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700163 r->capable = true;
164 /*
165 * By default, CDP is disabled. CDP can be enabled by mount parameter
166 * "cdp" during resctrl file system mount time.
167 */
168 r->enabled = false;
169}
170
Vikas Shivappade016df2017-04-03 14:44:17 -0700171/**
172 * Choose a width for the resource name
173 * and resource data based on the resource that has
174 * widest name and cbm.
175 */
176static void rdt_init_padding(void)
177{
178 struct rdt_resource *r;
179 int cl;
180
181 for_each_enabled_rdt_resource(r) {
182 cl = strlen(r->name);
183 if (cl > max_name_width)
184 max_name_width = cl;
185
186 if (r->data_width > max_data_width)
187 max_data_width = r->data_width;
188 }
189}
190
Fenghua Yu78e99b42016-10-22 06:19:53 -0700191static inline bool get_rdt_resources(void)
192{
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700193 bool ret = false;
194
Fenghua Yu113c6092016-10-22 06:19:54 -0700195 if (cache_alloc_hsw_probe())
196 return true;
Fenghua Yu78e99b42016-10-22 06:19:53 -0700197
198 if (!boot_cpu_has(X86_FEATURE_RDT_A))
199 return false;
Fenghua Yu78e99b42016-10-22 06:19:53 -0700200
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700201 if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
202 rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
203 if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
204 rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
205 rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
206 }
207 ret = true;
208 }
209 if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
210 /* CPUID 0x10.2 fields are same format at 0x10.1 */
211 rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
212 ret = true;
213 }
214
215 return ret;
Fenghua Yu78e99b42016-10-22 06:19:53 -0700216}
217
Tony Luck2264d9c2016-10-28 15:04:41 -0700218static int get_cache_id(int cpu, int level)
219{
220 struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
221 int i;
222
223 for (i = 0; i < ci->num_leaves; i++) {
224 if (ci->info_list[i].level == level)
225 return ci->info_list[i].id;
226 }
227
228 return -1;
229}
230
231void rdt_cbm_update(void *arg)
232{
233 struct msr_param *m = (struct msr_param *)arg;
234 struct rdt_resource *r = m->res;
235 int i, cpu = smp_processor_id();
236 struct rdt_domain *d;
237
238 list_for_each_entry(d, &r->domains, list) {
239 /* Find the domain that contains this CPU */
240 if (cpumask_test_cpu(cpu, &d->cpu_mask))
241 goto found;
242 }
243 pr_info_once("cpu %d not found in any domain for resource %s\n",
244 cpu, r->name);
245
246 return;
247
248found:
249 for (i = m->low; i < m->high; i++) {
250 int idx = cbm_idx(r, i);
251
252 wrmsrl(r->msr_base + idx, d->cbm[i]);
253 }
254}
255
256/*
257 * rdt_find_domain - Find a domain in a resource that matches input resource id
258 *
259 * Search resource r's domain list to find the resource id. If the resource
260 * id is found in a domain, return the domain. Otherwise, if requested by
261 * caller, return the first domain whose id is bigger than the input id.
262 * The domain list is sorted by id in ascending order.
263 */
264static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
265 struct list_head **pos)
266{
267 struct rdt_domain *d;
268 struct list_head *l;
269
270 if (id < 0)
271 return ERR_PTR(id);
272
273 list_for_each(l, &r->domains) {
274 d = list_entry(l, struct rdt_domain, list);
275 /* When id is found, return its domain. */
276 if (id == d->id)
277 return d;
278 /* Stop searching when finding id's position in sorted list. */
279 if (id < d->id)
280 break;
281 }
282
283 if (pos)
284 *pos = l;
285
286 return NULL;
287}
288
289/*
290 * domain_add_cpu - Add a cpu to a resource's domain list.
291 *
292 * If an existing domain in the resource r's domain list matches the cpu's
293 * resource id, add the cpu in the domain.
294 *
295 * Otherwise, a new domain is allocated and inserted into the right position
296 * in the domain list sorted by id in ascending order.
297 *
298 * The order in the domain list is visible to users when we print entries
299 * in the schemata file and schemata input is validated to have the same order
300 * as this list.
301 */
302static void domain_add_cpu(int cpu, struct rdt_resource *r)
303{
304 int i, id = get_cache_id(cpu, r->cache_level);
305 struct list_head *add_pos = NULL;
306 struct rdt_domain *d;
307
308 d = rdt_find_domain(r, id, &add_pos);
309 if (IS_ERR(d)) {
310 pr_warn("Could't find cache id for cpu %d\n", cpu);
311 return;
312 }
313
314 if (d) {
315 cpumask_set_cpu(cpu, &d->cpu_mask);
316 return;
317 }
318
319 d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
320 if (!d)
321 return;
322
323 d->id = id;
324
325 d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL);
326 if (!d->cbm) {
327 kfree(d);
328 return;
329 }
330
331 for (i = 0; i < r->num_closid; i++) {
332 int idx = cbm_idx(r, i);
333
334 d->cbm[i] = r->max_cbm;
335 wrmsrl(r->msr_base + idx, d->cbm[i]);
336 }
337
338 cpumask_set_cpu(cpu, &d->cpu_mask);
339 list_add_tail(&d->list, add_pos);
Tony Luck2264d9c2016-10-28 15:04:41 -0700340}
341
342static void domain_remove_cpu(int cpu, struct rdt_resource *r)
343{
344 int id = get_cache_id(cpu, r->cache_level);
345 struct rdt_domain *d;
346
347 d = rdt_find_domain(r, id, NULL);
348 if (IS_ERR_OR_NULL(d)) {
349 pr_warn("Could't find cache id for cpu %d\n", cpu);
350 return;
351 }
352
353 cpumask_clear_cpu(cpu, &d->cpu_mask);
354 if (cpumask_empty(&d->cpu_mask)) {
Tony Luck2264d9c2016-10-28 15:04:41 -0700355 kfree(d->cbm);
356 list_del(&d->list);
357 kfree(d);
358 }
359}
360
Tony Luck12e01102016-10-28 15:04:45 -0700361static void clear_closid(int cpu)
Tony Luck2264d9c2016-10-28 15:04:41 -0700362{
363 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
Tony Luck12e01102016-10-28 15:04:45 -0700364
365 per_cpu(cpu_closid, cpu) = 0;
366 state->closid = 0;
367 wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
368}
369
370static int intel_rdt_online_cpu(unsigned int cpu)
371{
Tony Luck2264d9c2016-10-28 15:04:41 -0700372 struct rdt_resource *r;
373
374 mutex_lock(&rdtgroup_mutex);
375 for_each_capable_rdt_resource(r)
376 domain_add_cpu(cpu, r);
Tony Luck12e01102016-10-28 15:04:45 -0700377 /* The cpu is set in default rdtgroup after online. */
378 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
379 clear_closid(cpu);
Tony Luck2264d9c2016-10-28 15:04:41 -0700380 mutex_unlock(&rdtgroup_mutex);
381
382 return 0;
383}
384
385static int intel_rdt_offline_cpu(unsigned int cpu)
Fenghua Yu78e99b42016-10-22 06:19:53 -0700386{
Tony Luck12e01102016-10-28 15:04:45 -0700387 struct rdtgroup *rdtgrp;
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700388 struct rdt_resource *r;
389
Tony Luck2264d9c2016-10-28 15:04:41 -0700390 mutex_lock(&rdtgroup_mutex);
391 for_each_capable_rdt_resource(r)
392 domain_remove_cpu(cpu, r);
Tony Luck12e01102016-10-28 15:04:45 -0700393 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
394 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask))
395 break;
396 }
397 clear_closid(cpu);
Tony Luck2264d9c2016-10-28 15:04:41 -0700398 mutex_unlock(&rdtgroup_mutex);
399
400 return 0;
401}
402
403static int __init intel_rdt_late_init(void)
404{
405 struct rdt_resource *r;
Fenghua Yu5ff193f2016-10-28 15:04:42 -0700406 int state, ret;
Tony Luck2264d9c2016-10-28 15:04:41 -0700407
Fenghua Yu78e99b42016-10-22 06:19:53 -0700408 if (!get_rdt_resources())
409 return -ENODEV;
410
Thomas Gleixner06b57e42017-04-14 14:06:26 +0200411 rdt_init_padding();
412
Tony Luck2264d9c2016-10-28 15:04:41 -0700413 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
414 "x86/rdt/cat:online:",
415 intel_rdt_online_cpu, intel_rdt_offline_cpu);
416 if (state < 0)
417 return state;
418
Fenghua Yu5ff193f2016-10-28 15:04:42 -0700419 ret = rdtgroup_init();
420 if (ret) {
421 cpuhp_remove_state(state);
422 return ret;
423 }
424
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700425 for_each_capable_rdt_resource(r)
426 pr_info("Intel RDT %s allocation detected\n", r->name);
Fenghua Yu78e99b42016-10-22 06:19:53 -0700427
428 return 0;
429}
430
431late_initcall(intel_rdt_late_init);