blob: baafbb37be678dfdf1fe148141c65b93611fd137 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Intel specific MCE features.
4 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
Andi Kleen88ccbed2009-02-12 13:49:36 +01005 * Copyright (C) 2008, 2009 Intel Corporation
6 * Author: Andi Kleen
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/interrupt.h>
11#include <linux/percpu.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040012#include <linux/sched.h>
Chen, Gong27f6c572014-03-27 21:24:36 -040013#include <linux/cpumask.h>
H. Peter Anvin1bf7b312009-06-17 08:31:15 -070014#include <asm/apic.h>
Tony Luck3f5a7892016-11-18 09:48:36 -080015#include <asm/cpufeature.h>
16#include <asm/intel-family.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/processor.h>
18#include <asm/msr.h>
19#include <asm/mce.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Borislav Petkov21afaf12018-11-18 15:15:05 +010021#include "internal.h"
Chen Gong55babd82012-08-09 11:44:51 -070022
Andi Kleen88ccbed2009-02-12 13:49:36 +010023/*
24 * Support for Intel Correct Machine Check Interrupts. This allows
25 * the CPU to raise an interrupt when a corrected machine check happened.
26 * Normally we pick those up using a regular polling timer.
27 * Also supports reliable discovery of shared banks.
28 */
29
Naveen N. Rao06444142013-06-25 23:58:59 +053030/*
31 * CMCI can be delivered to multiple cpus that share a machine check bank
32 * so we need to designate a single cpu to process errors logged in each bank
33 * in the interrupt handler (otherwise we would have many races and potential
34 * double reporting of the same error).
35 * Note that this can change when a cpu is offlined or brought online since
36 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
37 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
38 * this point, cmci_rediscover() kicks in and a different cpu may end up
39 * taking ownership of some of the shared MCA banks that were previously
40 * owned by the offlined cpu.
41 */
Andi Kleen88ccbed2009-02-12 13:49:36 +010042static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
43
44/*
Borislav Petkov3f2f0682015-01-13 15:08:51 +010045 * CMCI storm detection backoff counter
46 *
47 * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've
48 * encountered an error. If not, we decrement it by one. We signal the end of
49 * the CMCI storm when it reaches 0.
50 */
51static DEFINE_PER_CPU(int, cmci_backoff_cnt);
52
53/*
Andi Kleen88ccbed2009-02-12 13:49:36 +010054 * cmci_discover_lock protects against parallel discovery attempts
55 * which could race against each other.
56 */
Thomas Gleixnered5c41d32014-08-05 22:57:19 +020057static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
Andi Kleen88ccbed2009-02-12 13:49:36 +010058
Chen Gong55babd82012-08-09 11:44:51 -070059#define CMCI_THRESHOLD 1
60#define CMCI_POLL_INTERVAL (30 * HZ)
Borislav Petkov3f2f0682015-01-13 15:08:51 +010061#define CMCI_STORM_INTERVAL (HZ)
Chen Gong55babd82012-08-09 11:44:51 -070062#define CMCI_STORM_THRESHOLD 15
63
64static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
65static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
66static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
67
68enum {
69 CMCI_STORM_NONE,
70 CMCI_STORM_ACTIVE,
71 CMCI_STORM_SUBSIDED,
72};
73
74static atomic_t cmci_storm_on_cpus;
Andi Kleen88ccbed2009-02-12 13:49:36 +010075
H. Peter Anvindf20e2e2009-02-24 13:19:02 -080076static int cmci_supported(int *banks)
Andi Kleen88ccbed2009-02-12 13:49:36 +010077{
78 u64 cap;
79
Borislav Petkov7af19e42012-10-15 20:25:17 +020080 if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
Hidetoshi Seto62fdac52009-06-11 16:06:07 +090081 return 0;
82
Andi Kleen88ccbed2009-02-12 13:49:36 +010083 /*
84 * Vendor check is not strictly needed, but the initial
85 * initialization is vendor keyed and this
86 * makes sure none of the backdoors are entered otherwise.
87 */
Tony W Wang-oc5a3d56a2019-09-18 14:19:32 +080088 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
89 boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
Andi Kleen88ccbed2009-02-12 13:49:36 +010090 return 0;
Tony W Wang-oc5a3d56a2019-09-18 14:19:32 +080091
Borislav Petkov93984fb2016-04-04 22:25:00 +020092 if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
Andi Kleen88ccbed2009-02-12 13:49:36 +010093 return 0;
94 rdmsrl(MSR_IA32_MCG_CAP, cap);
95 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
96 return !!(cap & MCG_CMCI_P);
97}
98
Ashok Raj88d53862015-06-04 18:55:23 +020099static bool lmce_supported(void)
100{
101 u64 tmp;
102
103 if (mca_cfg.lmce_disabled)
104 return false;
105
106 rdmsrl(MSR_IA32_MCG_CAP, tmp);
107
108 /*
109 * LMCE depends on recovery support in the processor. Hence both
110 * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP.
111 */
112 if ((tmp & (MCG_SER_P | MCG_LMCE_P)) !=
113 (MCG_SER_P | MCG_LMCE_P))
114 return false;
115
116 /*
117 * BIOS should indicate support for LMCE by setting bit 20 in
Sean Christopherson32ad73d2019-12-20 20:44:55 -0800118 * IA32_FEAT_CTL without which touching MCG_EXT_CTL will generate a #GP
Sean Christopherson6d527ce2019-12-20 20:44:59 -0800119 * fault. The MSR must also be locked for LMCE_ENABLED to take effect.
120 * WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally
121 * locks the MSR in the event that it wasn't already locked by BIOS.
Ashok Raj88d53862015-06-04 18:55:23 +0200122 */
Sean Christopherson32ad73d2019-12-20 20:44:55 -0800123 rdmsrl(MSR_IA32_FEAT_CTL, tmp);
Sean Christopherson6d527ce2019-12-20 20:44:59 -0800124 if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED)))
125 return false;
Ashok Raj88d53862015-06-04 18:55:23 +0200126
Sean Christopherson6d527ce2019-12-20 20:44:59 -0800127 return tmp & FEAT_CTL_LMCE_ENABLED;
Ashok Raj88d53862015-06-04 18:55:23 +0200128}
129
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100130bool mce_intel_cmci_poll(void)
Chen Gong55babd82012-08-09 11:44:51 -0700131{
132 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100133 return false;
134
135 /*
136 * Reset the counter if we've logged an error in the last poll
137 * during the storm.
138 */
Borislav Petkov54467352016-11-10 14:10:53 +0100139 if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)))
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100140 this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
141 else
142 this_cpu_dec(cmci_backoff_cnt);
143
144 return true;
Chen Gong55babd82012-08-09 11:44:51 -0700145}
146
147void mce_intel_hcpu_update(unsigned long cpu)
148{
149 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
150 atomic_dec(&cmci_storm_on_cpus);
151
152 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
153}
154
Xie XiuQi1b484652015-08-12 18:29:41 +0200155static void cmci_toggle_interrupt_mode(bool on)
156{
157 unsigned long flags, *owned;
158 int bank;
159 u64 val;
160
161 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
162 owned = this_cpu_ptr(mce_banks_owned);
163 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
164 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
165
166 if (on)
167 val |= MCI_CTL2_CMCI_EN;
168 else
169 val &= ~MCI_CTL2_CMCI_EN;
170
171 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
172 }
173 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
174}
175
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100176unsigned long cmci_intel_adjust_timer(unsigned long interval)
Chen Gong55babd82012-08-09 11:44:51 -0700177{
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100178 if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
179 (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) {
180 mce_notify_irq();
181 return CMCI_STORM_INTERVAL;
182 }
Chen Gong55babd82012-08-09 11:44:51 -0700183
184 switch (__this_cpu_read(cmci_storm_state)) {
185 case CMCI_STORM_ACTIVE:
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100186
Chen Gong55babd82012-08-09 11:44:51 -0700187 /*
188 * We switch back to interrupt mode once the poll timer has
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100189 * silenced itself. That means no events recorded and the timer
190 * interval is back to our poll interval.
Chen Gong55babd82012-08-09 11:44:51 -0700191 */
192 __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100193 if (!atomic_sub_return(1, &cmci_storm_on_cpus))
Chen Gong55babd82012-08-09 11:44:51 -0700194 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100195
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500196 fallthrough;
Chen Gong55babd82012-08-09 11:44:51 -0700197
198 case CMCI_STORM_SUBSIDED:
199 /*
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100200 * We wait for all CPUs to go back to SUBSIDED state. When that
201 * happens we switch back to interrupt mode.
Chen Gong55babd82012-08-09 11:44:51 -0700202 */
203 if (!atomic_read(&cmci_storm_on_cpus)) {
204 __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
Xie XiuQi1b484652015-08-12 18:29:41 +0200205 cmci_toggle_interrupt_mode(true);
Chen Gong55babd82012-08-09 11:44:51 -0700206 cmci_recheck();
207 }
208 return CMCI_POLL_INTERVAL;
209 default:
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100210
211 /* We have shiny weather. Let the poll do whatever it thinks. */
Chen Gong55babd82012-08-09 11:44:51 -0700212 return interval;
213 }
214}
215
216static bool cmci_storm_detect(void)
217{
218 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
219 unsigned long ts = __this_cpu_read(cmci_time_stamp);
220 unsigned long now = jiffies;
221 int r;
222
223 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
224 return true;
225
226 if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
227 cnt++;
228 } else {
229 cnt = 1;
230 __this_cpu_write(cmci_time_stamp, now);
231 }
232 __this_cpu_write(cmci_storm_cnt, cnt);
233
234 if (cnt <= CMCI_STORM_THRESHOLD)
235 return false;
236
Xie XiuQi1b484652015-08-12 18:29:41 +0200237 cmci_toggle_interrupt_mode(false);
Chen Gong55babd82012-08-09 11:44:51 -0700238 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
239 r = atomic_add_return(1, &cmci_storm_on_cpus);
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100240 mce_timer_kick(CMCI_STORM_INTERVAL);
241 this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
Chen Gong55babd82012-08-09 11:44:51 -0700242
243 if (r == 1)
244 pr_notice("CMCI storm detected: switching to poll mode\n");
245 return true;
246}
247
Andi Kleen88ccbed2009-02-12 13:49:36 +0100248/*
249 * The interrupt handler. This is called on every event.
250 * Just call the poller directly to log any events.
251 * This could in theory increase the threshold under high load,
252 * but doesn't for now.
253 */
254static void intel_threshold_interrupt(void)
255{
Chen Gong55babd82012-08-09 11:44:51 -0700256 if (cmci_storm_detect())
257 return;
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100258
Christoph Lameter89cbc762014-08-17 12:30:40 -0500259 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
Andi Kleen88ccbed2009-02-12 13:49:36 +0100260}
261
Andi Kleen88ccbed2009-02-12 13:49:36 +0100262/*
263 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
264 * on this CPU. Use the algorithm recommended in the SDM to discover shared
265 * banks.
266 */
Tony Luck4670a3002012-08-09 10:59:21 -0700267static void cmci_discover(int banks)
Andi Kleen88ccbed2009-02-12 13:49:36 +0100268{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500269 unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
Hidetoshi Setoe5299922009-05-08 17:28:40 +0900270 unsigned long flags;
Andi Kleen88ccbed2009-02-12 13:49:36 +0100271 int i;
Naveen N. Rao450cc202012-09-27 10:08:00 -0700272 int bios_wrong_thresh = 0;
Andi Kleen88ccbed2009-02-12 13:49:36 +0100273
Thomas Gleixnered5c41d32014-08-05 22:57:19 +0200274 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
Andi Kleen88ccbed2009-02-12 13:49:36 +0100275 for (i = 0; i < banks; i++) {
276 u64 val;
Naveen N. Rao450cc202012-09-27 10:08:00 -0700277 int bios_zero_thresh = 0;
Andi Kleen88ccbed2009-02-12 13:49:36 +0100278
279 if (test_bit(i, owned))
280 continue;
281
Naveen N. Raoc3d1fb52013-07-01 21:08:47 +0530282 /* Skip banks in firmware first mode */
283 if (test_bit(i, mce_banks_ce_disabled))
284 continue;
285
Andi Kleena2d32bc2009-07-09 00:31:44 +0200286 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
Andi Kleen88ccbed2009-02-12 13:49:36 +0100287
288 /* Already owned by someone else? */
Huang Ying1f9a0bd2010-06-08 14:09:08 +0800289 if (val & MCI_CTL2_CMCI_EN) {
Tony Luck4670a3002012-08-09 10:59:21 -0700290 clear_bit(i, owned);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500291 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
Andi Kleen88ccbed2009-02-12 13:49:36 +0100292 continue;
293 }
294
Borislav Petkov14625942012-10-17 12:05:33 +0200295 if (!mca_cfg.bios_cmci_threshold) {
Naveen N. Rao450cc202012-09-27 10:08:00 -0700296 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
297 val |= CMCI_THRESHOLD;
298 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
299 /*
300 * If bios_cmci_threshold boot option was specified
301 * but the threshold is zero, we'll try to initialize
302 * it to 1.
303 */
304 bios_zero_thresh = 1;
305 val |= CMCI_THRESHOLD;
306 }
307
308 val |= MCI_CTL2_CMCI_EN;
Andi Kleena2d32bc2009-07-09 00:31:44 +0200309 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
310 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
Andi Kleen88ccbed2009-02-12 13:49:36 +0100311
312 /* Did the enable bit stick? -- the bank supports CMCI */
Huang Ying1f9a0bd2010-06-08 14:09:08 +0800313 if (val & MCI_CTL2_CMCI_EN) {
Tony Luck4670a3002012-08-09 10:59:21 -0700314 set_bit(i, owned);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500315 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
Naveen N. Rao450cc202012-09-27 10:08:00 -0700316 /*
317 * We are able to set thresholds for some banks that
318 * had a threshold of 0. This means the BIOS has not
319 * set the thresholds properly or does not work with
320 * this boot option. Note down now and report later.
321 */
Borislav Petkov14625942012-10-17 12:05:33 +0200322 if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
Naveen N. Rao450cc202012-09-27 10:08:00 -0700323 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
324 bios_wrong_thresh = 1;
Andi Kleen88ccbed2009-02-12 13:49:36 +0100325 } else {
Christoph Lameter89cbc762014-08-17 12:30:40 -0500326 WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
Andi Kleen88ccbed2009-02-12 13:49:36 +0100327 }
328 }
Thomas Gleixnered5c41d32014-08-05 22:57:19 +0200329 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
Borislav Petkov14625942012-10-17 12:05:33 +0200330 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
Naveen N. Rao450cc202012-09-27 10:08:00 -0700331 pr_info_once(
332 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
333 pr_info_once(
334 "bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
335 }
Andi Kleen88ccbed2009-02-12 13:49:36 +0100336}
337
338/*
339 * Just in case we missed an event during initialization check
340 * all the CMCI owned banks.
341 */
H. Peter Anvindf20e2e2009-02-24 13:19:02 -0800342void cmci_recheck(void)
Andi Kleen88ccbed2009-02-12 13:49:36 +0100343{
344 unsigned long flags;
345 int banks;
346
Christoph Lameter89cbc762014-08-17 12:30:40 -0500347 if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
Andi Kleen88ccbed2009-02-12 13:49:36 +0100348 return;
Borislav Petkov3f2f0682015-01-13 15:08:51 +0100349
Andi Kleen88ccbed2009-02-12 13:49:36 +0100350 local_irq_save(flags);
Borislav Petkov54467352016-11-10 14:10:53 +0100351 machine_check_poll(0, this_cpu_ptr(&mce_banks_owned));
Andi Kleen88ccbed2009-02-12 13:49:36 +0100352 local_irq_restore(flags);
353}
354
Naveen N. Raoc3d1fb52013-07-01 21:08:47 +0530355/* Caller must hold the lock on cmci_discover_lock */
356static void __cmci_disable_bank(int bank)
357{
358 u64 val;
359
Christoph Lameter89cbc762014-08-17 12:30:40 -0500360 if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
Naveen N. Raoc3d1fb52013-07-01 21:08:47 +0530361 return;
362 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
363 val &= ~MCI_CTL2_CMCI_EN;
364 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500365 __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
Naveen N. Raoc3d1fb52013-07-01 21:08:47 +0530366}
367
Andi Kleen88ccbed2009-02-12 13:49:36 +0100368/*
369 * Disable CMCI on this CPU for all banks it owns when it goes down.
370 * This allows other CPUs to claim the banks on rediscovery.
371 */
H. Peter Anvindf20e2e2009-02-24 13:19:02 -0800372void cmci_clear(void)
Andi Kleen88ccbed2009-02-12 13:49:36 +0100373{
Hidetoshi Setoe5299922009-05-08 17:28:40 +0900374 unsigned long flags;
Andi Kleen88ccbed2009-02-12 13:49:36 +0100375 int i;
376 int banks;
Andi Kleen88ccbed2009-02-12 13:49:36 +0100377
378 if (!cmci_supported(&banks))
379 return;
Thomas Gleixnered5c41d32014-08-05 22:57:19 +0200380 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
Naveen N. Raoc3d1fb52013-07-01 21:08:47 +0530381 for (i = 0; i < banks; i++)
382 __cmci_disable_bank(i);
Thomas Gleixnered5c41d32014-08-05 22:57:19 +0200383 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
Andi Kleen88ccbed2009-02-12 13:49:36 +0100384}
385
Srivatsa S. Bhat7a0c8192013-03-20 15:31:29 +0530386static void cmci_rediscover_work_func(void *arg)
Tang Chen85b97632012-10-29 11:01:50 +0800387{
388 int banks;
389
390 /* Recheck banks in case CPUs don't all have the same */
391 if (cmci_supported(&banks))
392 cmci_discover(banks);
Tang Chen85b97632012-10-29 11:01:50 +0800393}
394
Srivatsa S. Bhat7a0c8192013-03-20 15:31:29 +0530395/* After a CPU went down cycle through all the others and rediscover */
396void cmci_rediscover(void)
Andi Kleen88ccbed2009-02-12 13:49:36 +0100397{
Srivatsa S. Bhat7a0c8192013-03-20 15:31:29 +0530398 int banks;
Andi Kleen88ccbed2009-02-12 13:49:36 +0100399
400 if (!cmci_supported(&banks))
401 return;
Andi Kleen88ccbed2009-02-12 13:49:36 +0100402
Srivatsa S. Bhat7a0c8192013-03-20 15:31:29 +0530403 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
Andi Kleen88ccbed2009-02-12 13:49:36 +0100404}
405
406/*
407 * Reenable CMCI on this CPU in case a CPU down failed.
408 */
409void cmci_reenable(void)
410{
411 int banks;
412 if (cmci_supported(&banks))
Tony Luck4670a3002012-08-09 10:59:21 -0700413 cmci_discover(banks);
Andi Kleen88ccbed2009-02-12 13:49:36 +0100414}
415
Naveen N. Raoc3d1fb52013-07-01 21:08:47 +0530416void cmci_disable_bank(int bank)
417{
418 int banks;
419 unsigned long flags;
420
421 if (!cmci_supported(&banks))
422 return;
423
Thomas Gleixnered5c41d32014-08-05 22:57:19 +0200424 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
Naveen N. Raoc3d1fb52013-07-01 21:08:47 +0530425 __cmci_disable_bank(bank);
Thomas Gleixnered5c41d32014-08-05 22:57:19 +0200426 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
Naveen N. Raoc3d1fb52013-07-01 21:08:47 +0530427}
428
Tony W Wang-oc5a3d56a2019-09-18 14:19:32 +0800429void intel_init_cmci(void)
Andi Kleen88ccbed2009-02-12 13:49:36 +0100430{
431 int banks;
432
433 if (!cmci_supported(&banks))
434 return;
435
436 mce_threshold_vector = intel_threshold_interrupt;
Tony Luck4670a3002012-08-09 10:59:21 -0700437 cmci_discover(banks);
Andi Kleen88ccbed2009-02-12 13:49:36 +0100438 /*
439 * For CPU #0 this runs with still disabled APIC, but that's
440 * ok because only the vector is set up. We still do another
441 * check for the banks later for CPU #0 just to make sure
442 * to not miss any events.
443 */
444 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
445 cmci_recheck();
446}
447
Tony W Wang-oc70f0c232019-09-18 14:19:33 +0800448void intel_init_lmce(void)
Ashok Raj88d53862015-06-04 18:55:23 +0200449{
450 u64 val;
451
452 if (!lmce_supported())
453 return;
454
455 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
456
457 if (!(val & MCG_EXT_CTL_LMCE_EN))
458 wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
459}
460
Tony W Wang-oc70f0c232019-09-18 14:19:33 +0800461void intel_clear_lmce(void)
Ashok Raj8838eb62015-08-12 18:29:40 +0200462{
463 u64 val;
464
465 if (!lmce_supported())
466 return;
467
468 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
469 val &= ~MCG_EXT_CTL_LMCE_EN;
470 wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
471}
472
Tony Luck3f5a7892016-11-18 09:48:36 -0800473static void intel_ppin_init(struct cpuinfo_x86 *c)
474{
475 unsigned long long val;
476
477 /*
478 * Even if testing the presence of the MSR would be enough, we don't
479 * want to risk the situation where other models reuse this MSR for
480 * other purposes.
481 */
482 switch (c->x86_model) {
483 case INTEL_FAM6_IVYBRIDGE_X:
484 case INTEL_FAM6_HASWELL_X:
Peter Zijlstra5ebb34e2019-08-27 21:48:24 +0200485 case INTEL_FAM6_BROADWELL_D:
Tony Luck3f5a7892016-11-18 09:48:36 -0800486 case INTEL_FAM6_BROADWELL_X:
487 case INTEL_FAM6_SKYLAKE_X:
Tony Luckdc6b0252019-10-28 09:37:19 -0700488 case INTEL_FAM6_ICELAKE_X:
Tony Lucke4641212022-01-21 09:47:38 -0800489 case INTEL_FAM6_ICELAKE_D:
Tony Lucka331f5f2021-03-19 10:39:19 -0700490 case INTEL_FAM6_SAPPHIRERAPIDS_X:
Piotr Luc9ea74f72017-04-13 22:10:56 +0200491 case INTEL_FAM6_XEON_PHI_KNL:
492 case INTEL_FAM6_XEON_PHI_KNM:
493
Tony Luck3f5a7892016-11-18 09:48:36 -0800494 if (rdmsrl_safe(MSR_PPIN_CTL, &val))
495 return;
496
497 if ((val & 3UL) == 1UL) {
Tony Luck59b58092020-02-25 17:17:37 -0800498 /* PPIN locked in disabled mode */
Tony Luck3f5a7892016-11-18 09:48:36 -0800499 return;
500 }
501
Tony Luck59b58092020-02-25 17:17:37 -0800502 /* If PPIN is disabled, try to enable */
503 if (!(val & 2UL)) {
Tony Luck3f5a7892016-11-18 09:48:36 -0800504 wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
505 rdmsrl_safe(MSR_PPIN_CTL, &val);
506 }
507
Tony Luck59b58092020-02-25 17:17:37 -0800508 /* Is the enable bit set? */
509 if (val & 2UL)
Tony Luck3f5a7892016-11-18 09:48:36 -0800510 set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
511 }
512}
513
Tony Luck68299a42020-10-30 12:04:00 -0700514/*
515 * Enable additional error logs from the integrated
516 * memory controller on processors that support this.
517 */
518static void intel_imc_init(struct cpuinfo_x86 *c)
519{
520 u64 error_control;
521
522 switch (c->x86_model) {
523 case INTEL_FAM6_SANDYBRIDGE_X:
524 case INTEL_FAM6_IVYBRIDGE_X:
525 case INTEL_FAM6_HASWELL_X:
Tony Luck098416e2020-11-10 16:39:54 -0800526 if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control))
527 return;
Tony Luck68299a42020-10-30 12:04:00 -0700528 error_control |= 2;
Tony Luck098416e2020-11-10 16:39:54 -0800529 wrmsrl_safe(MSR_ERROR_CONTROL, error_control);
Tony Luck68299a42020-10-30 12:04:00 -0700530 break;
531 }
532}
533
H. Peter Anvincc3ca222009-02-20 23:35:51 -0800534void mce_intel_feature_init(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535{
Andi Kleen88ccbed2009-02-12 13:49:36 +0100536 intel_init_cmci();
Ashok Raj243d6572015-06-04 18:55:24 +0200537 intel_init_lmce();
Tony Luck3f5a7892016-11-18 09:48:36 -0800538 intel_ppin_init(c);
Tony Luck68299a42020-10-30 12:04:00 -0700539 intel_imc_init(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540}
Ashok Raj8838eb62015-08-12 18:29:40 +0200541
542void mce_intel_feature_clear(struct cpuinfo_x86 *c)
543{
544 intel_clear_lmce();
545}
Prarit Bhargava29769082020-02-19 08:16:11 -0500546
547bool intel_filter_mce(struct mce *m)
548{
549 struct cpuinfo_x86 *c = &boot_cpu_data;
550
Dave Jonese629fc12021-10-29 16:57:59 -0400551 /* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */
Prarit Bhargava29769082020-02-19 08:16:11 -0500552 if ((c->x86 == 6) &&
553 ((c->x86_model == INTEL_FAM6_HASWELL) ||
554 (c->x86_model == INTEL_FAM6_HASWELL_L) ||
555 (c->x86_model == INTEL_FAM6_BROADWELL) ||
Dave Jonese629fc12021-10-29 16:57:59 -0400556 (c->x86_model == INTEL_FAM6_HASWELL_G) ||
557 (c->x86_model == INTEL_FAM6_SKYLAKE_X)) &&
Prarit Bhargava29769082020-02-19 08:16:11 -0500558 (m->bank == 0) &&
559 ((m->status & 0xa0000000ffffffff) == 0x80000000000f0005))
560 return true;
561
562 return false;
563}