blob: 8cf60e29790a943f89ab6f22ce34c02189015976 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Machine check handler.
Ingo Molnare9eee032009-04-08 12:31:17 +02003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02005 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
Andi Kleenb79109c2009-02-12 13:43:23 +01007 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
Joe Perchesc767a542012-05-21 19:50:07 -070010
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Tim Hockine02e68d2007-07-21 17:10:36 +020013#include <linux/thread_info.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020014#include <linux/capability.h>
15#include <linux/miscdevice.h>
Andi Kleen8457c842009-02-12 13:49:33 +010016#include <linux/ratelimit.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020017#include <linux/kallsyms.h>
18#include <linux/rcupdate.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020019#include <linux/kobject.h>
Hidetoshi Seto14a02532009-04-30 16:04:51 +090020#include <linux/uaccess.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020021#include <linux/kdebug.h>
22#include <linux/kernel.h>
23#include <linux/percpu.h>
24#include <linux/string.h>
Kay Sievers8a25a2f2011-12-21 14:29:42 -080025#include <linux/device.h>
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +010026#include <linux/syscore_ops.h>
Andi Kleen3c079792009-05-27 21:56:55 +020027#include <linux/delay.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020028#include <linux/ctype.h>
29#include <linux/sched.h>
30#include <linux/sysfs.h>
31#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020033#include <linux/init.h>
34#include <linux/kmod.h>
35#include <linux/poll.h>
Andi Kleen3c079792009-05-27 21:56:55 +020036#include <linux/nmi.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020037#include <linux/cpu.h>
Hidetoshi Seto14a02532009-04-30 16:04:51 +090038#include <linux/smp.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020039#include <linux/fs.h>
Andi Kleen9b1beaf2009-05-27 21:56:59 +020040#include <linux/mm.h>
Huang Ying5be9ed22009-07-31 09:41:42 +080041#include <linux/debugfs.h>
Hidetoshi Setob77e70b2011-06-08 10:56:02 +090042#include <linux/irq_work.h>
Paul Gortmaker69c60c82011-05-26 12:22:53 -040043#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Ingo Molnare9eee032009-04-08 12:31:17 +020045#include <asm/processor.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020046#include <asm/mce.h>
47#include <asm/msr.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020048
Andi Kleenbd19a5e2009-05-27 21:56:55 +020049#include "mce-internal.h"
Ingo Molnar711c2e42009-04-08 12:31:26 +020050
Hidetoshi Seto93b62c32011-06-08 11:00:45 +090051static DEFINE_MUTEX(mce_chrdev_read_mutex);
Ingo Molnar2aa2b50dd2010-03-14 08:57:03 +010052
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -080053#define rcu_dereference_check_mce(p) \
Paul E. McKenneyec8c27e2010-04-30 06:45:36 -070054 rcu_dereference_index_check((p), \
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -080055 rcu_read_lock_sched_held() || \
Hidetoshi Seto93b62c32011-06-08 11:00:45 +090056 lockdep_is_held(&mce_chrdev_read_mutex))
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -080057
Hidetoshi Seto8968f9d2009-10-13 16:19:41 +090058#define CREATE_TRACE_POINTS
59#include <trace/events/mce.h>
60
Hidetoshi Seto4e5b3e62009-06-15 17:20:20 +090061int mce_disabled __read_mostly;
Andi Kleen04b2b1a2009-04-28 22:50:19 +020062
Ingo Molnare9eee032009-04-08 12:31:17 +020063#define MISC_MCELOG_MINOR 227
Andi Kleen0d7482e32009-02-17 23:07:13 +010064
Andi Kleen3c079792009-05-27 21:56:55 +020065#define SPINUNIT 100 /* 100ns */
66
Andi Kleen553f2652006-04-07 19:49:57 +020067atomic_t mce_entry;
68
Andi Kleen01ca79f2009-05-27 21:56:52 +020069DEFINE_PER_CPU(unsigned, mce_exception_count);
70
Tim Hockinbd784322007-07-21 17:10:37 +020071/*
72 * Tolerant levels:
73 * 0: always panic on uncorrected errors, log corrected errors
74 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
75 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
76 * 3: never panic or SIGBUS, log all errors (for testing only)
77 */
Hidetoshi Seto4e5b3e62009-06-15 17:20:20 +090078static int tolerant __read_mostly = 1;
79static int banks __read_mostly;
Hidetoshi Seto4e5b3e62009-06-15 17:20:20 +090080static int rip_msr __read_mostly;
81static int mce_bootlog __read_mostly = -1;
82static int monarch_timeout __read_mostly = -1;
83static int mce_panic_timeout __read_mostly;
84static int mce_dont_log_ce __read_mostly;
85int mce_cmci_disabled __read_mostly;
86int mce_ignore_ce __read_mostly;
87int mce_ser __read_mostly;
Andi Kleena98f0dd2007-02-13 13:26:23 +010088
Andi Kleencebe1822009-07-09 00:31:43 +020089struct mce_bank *mce_banks __read_mostly;
90
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +090091/* User mode helper program triggered by machine check event */
92static unsigned long mce_need_notify;
93static char mce_helper[128];
94static char *mce_helper_argv[2] = { mce_helper, NULL };
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Hidetoshi Seto93b62c32011-06-08 11:00:45 +090096static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
97
Andi Kleen3c079792009-05-27 21:56:55 +020098static DEFINE_PER_CPU(struct mce, mces_seen);
99static int cpu_missing;
100
Andi Kleenee031c32009-02-12 13:49:34 +0100101/* MCA banks polled by the period polling timer for corrected events */
102DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
103 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
104};
105
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200106static DEFINE_PER_CPU(struct work_struct, mce_work);
107
Tony Luck61b0fcc2012-07-19 11:28:46 -0700108static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
109
Borislav Petkov3653ada2011-12-04 15:12:09 +0100110/*
111 * CPU/chipset specific EDAC code can register a notifier call here to print
112 * MCE errors in a human-readable form.
113 */
114ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
115
Andi Kleenb5f2fa42009-02-12 13:43:22 +0100116/* Do initial initialization of a struct mce */
117void mce_setup(struct mce *m)
118{
119 memset(m, 0, sizeof(struct mce));
Andi Kleend620c672009-05-27 21:56:56 +0200120 m->cpu = m->extcpu = smp_processor_id();
Andi Kleenb5f2fa42009-02-12 13:43:22 +0100121 rdtscll(m->tsc);
Andi Kleen8ee08342009-05-27 21:56:56 +0200122 /* We hope get_seconds stays lockless */
123 m->time = get_seconds();
124 m->cpuvendor = boot_cpu_data.x86_vendor;
125 m->cpuid = cpuid_eax(1);
Andi Kleen8ee08342009-05-27 21:56:56 +0200126 m->socketid = cpu_data(m->extcpu).phys_proc_id;
Andi Kleen8ee08342009-05-27 21:56:56 +0200127 m->apicid = cpu_data(m->extcpu).initial_apicid;
128 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
Andi Kleenb5f2fa42009-02-12 13:43:22 +0100129}
130
Andi Kleenea149b32009-04-29 19:31:00 +0200131DEFINE_PER_CPU(struct mce, injectm);
132EXPORT_PER_CPU_SYMBOL_GPL(injectm);
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134/*
135 * Lockless MCE logging infrastructure.
136 * This avoids deadlocks on printk locks without having to break locks. Also
137 * separate MCEs from kernel messages to avoid bogus bug reports.
138 */
139
Adrian Bunk231fd902008-01-30 13:30:30 +0100140static struct mce_log mcelog = {
Andi Kleenf6fb0ac2009-05-27 21:56:55 +0200141 .signature = MCE_LOG_SIGNATURE,
142 .len = MCE_LOG_LEN,
143 .recordlen = sizeof(struct mce),
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200144};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146void mce_log(struct mce *mce)
147{
148 unsigned next, entry;
Borislav Petkovf0cb5452011-07-18 11:24:45 -0300149 int ret = 0;
Ingo Molnare9eee032009-04-08 12:31:17 +0200150
Hidetoshi Seto8968f9d2009-10-13 16:19:41 +0900151 /* Emit the trace record: */
152 trace_mce_record(mce);
153
Borislav Petkovf0cb5452011-07-18 11:24:45 -0300154 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
155 if (ret == NOTIFY_STOP)
156 return;
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 mce->finished = 0;
Mike Waychison76441432005-09-30 00:01:27 +0200159 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 for (;;) {
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -0800161 entry = rcu_dereference_check_mce(mcelog.next);
Andi Kleen673242c2005-09-12 18:49:24 +0200162 for (;;) {
Mauro Carvalho Chehab696e4092009-07-23 06:57:45 -0300163
164 /*
Ingo Molnare9eee032009-04-08 12:31:17 +0200165 * When the buffer fills up discard new entries.
166 * Assume that the earlier errors are the more
167 * interesting ones:
168 */
Andi Kleen673242c2005-09-12 18:49:24 +0200169 if (entry >= MCE_LOG_LEN) {
Hidetoshi Seto14a02532009-04-30 16:04:51 +0900170 set_bit(MCE_OVERFLOW,
171 (unsigned long *)&mcelog.flags);
Andi Kleen673242c2005-09-12 18:49:24 +0200172 return;
173 }
Ingo Molnare9eee032009-04-08 12:31:17 +0200174 /* Old left over entry. Skip: */
Andi Kleen673242c2005-09-12 18:49:24 +0200175 if (mcelog.entry[entry].finished) {
176 entry++;
177 continue;
178 }
Mike Waychison76441432005-09-30 00:01:27 +0200179 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 smp_rmb();
182 next = entry + 1;
183 if (cmpxchg(&mcelog.next, entry, next) == entry)
184 break;
185 }
186 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
Mike Waychison76441432005-09-30 00:01:27 +0200187 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 mcelog.entry[entry].finished = 1;
Mike Waychison76441432005-09-30 00:01:27 +0200189 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Andi Kleena0189c72009-05-27 21:56:54 +0200191 mce->finished = 1;
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +0900192 set_bit(0, &mce_need_notify);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
Borislav Petkov09371952011-12-08 12:28:33 +0100195static void drain_mcelog_buffer(void)
196{
197 unsigned int next, i, prev = 0;
198
Srivatsa S. Bhatb11e3d72012-03-07 11:44:29 +0100199 next = ACCESS_ONCE(mcelog.next);
Borislav Petkov09371952011-12-08 12:28:33 +0100200
201 do {
202 struct mce *m;
203
204 /* drain what was logged during boot */
205 for (i = prev; i < next; i++) {
206 unsigned long start = jiffies;
207 unsigned retries = 1;
208
209 m = &mcelog.entry[i];
210
211 while (!m->finished) {
212 if (time_after_eq(jiffies, start + 2*retries))
213 retries++;
214
215 cpu_relax();
216
217 if (!m->finished && retries >= 4) {
Joe Perchesc767a542012-05-21 19:50:07 -0700218 pr_err("skipping error being logged currently!\n");
Borislav Petkov09371952011-12-08 12:28:33 +0100219 break;
220 }
221 }
222 smp_rmb();
223 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
224 }
225
226 memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
227 prev = next;
228 next = cmpxchg(&mcelog.next, prev, 0);
229 } while (next != prev);
230}
231
232
Borislav Petkov3653ada2011-12-04 15:12:09 +0100233void mce_register_decode_chain(struct notifier_block *nb)
234{
235 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
Borislav Petkov09371952011-12-08 12:28:33 +0100236 drain_mcelog_buffer();
Borislav Petkov3653ada2011-12-04 15:12:09 +0100237}
238EXPORT_SYMBOL_GPL(mce_register_decode_chain);
239
240void mce_unregister_decode_chain(struct notifier_block *nb)
241{
242 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
243}
244EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
245
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900246static void print_mce(struct mce *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
Borislav Petkovdffa4b22011-04-20 12:23:49 +0200248 int ret = 0;
249
Huang Yinga2d7b0d2010-06-08 14:35:39 +0800250 pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
Andi Kleend620c672009-05-27 21:56:56 +0200251 m->extcpu, m->mcgstatus, m->bank, m->status);
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200252
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100253 if (m->ip) {
Huang Yinga2d7b0d2010-06-08 14:35:39 +0800254 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200255 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
256 m->cs, m->ip);
257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 if (m->cs == __KERNEL_CS)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100259 print_symbol("{%s}", m->ip);
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200260 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
Borislav Petkov549d0422009-07-24 13:51:42 +0200262
Huang Yinga2d7b0d2010-06-08 14:35:39 +0800263 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200264 if (m->addr)
265 pr_cont("ADDR %llx ", m->addr);
266 if (m->misc)
267 pr_cont("MISC %llx ", m->misc);
268
269 pr_cont("\n");
Andi Kleen506ed6b2011-10-12 17:46:33 -0700270 /*
271 * Note this output is parsed by external tools and old fields
272 * should not be changed.
273 */
Borislav Petkov881e23e2011-10-17 16:45:10 +0200274 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
Andi Kleen506ed6b2011-10-12 17:46:33 -0700275 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
276 cpu_data(m->extcpu).microcode);
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200277
278 /*
279 * Print out human-readable details about the MCE error,
Borislav Petkovfb253192009-10-07 13:20:38 +0200280 * (if the CPU has an implementation for that)
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200281 */
Borislav Petkovdffa4b22011-04-20 12:23:49 +0200282 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
283 if (ret == NOTIFY_STOP)
284 return;
285
286 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
Andi Kleen86503562009-05-27 21:56:58 +0200287}
288
Andi Kleenf94b61c2009-05-27 21:56:55 +0200289#define PANIC_TIMEOUT 5 /* 5 seconds */
290
291static atomic_t mce_paniced;
292
Huang Yingbf783f92009-07-31 09:41:43 +0800293static int fake_panic;
294static atomic_t mce_fake_paniced;
295
Andi Kleenf94b61c2009-05-27 21:56:55 +0200296/* Panic in progress. Enable interrupts and wait for final IPI */
297static void wait_for_panic(void)
298{
299 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200300
Andi Kleenf94b61c2009-05-27 21:56:55 +0200301 preempt_disable();
302 local_irq_enable();
303 while (timeout-- > 0)
304 udelay(1);
Andi Kleen29b0f592009-05-27 21:56:56 +0200305 if (panic_timeout == 0)
306 panic_timeout = mce_panic_timeout;
Andi Kleenf94b61c2009-05-27 21:56:55 +0200307 panic("Panicing machine check CPU died");
308}
309
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200310static void mce_panic(char *msg, struct mce *final, char *exp)
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200311{
Huang Ying482908b2010-05-18 14:35:22 +0800312 int i, apei_err = 0;
Tim Hockine02e68d2007-07-21 17:10:36 +0200313
Huang Yingbf783f92009-07-31 09:41:43 +0800314 if (!fake_panic) {
315 /*
316 * Make sure only one CPU runs in machine check panic
317 */
318 if (atomic_inc_return(&mce_paniced) > 1)
319 wait_for_panic();
320 barrier();
Andi Kleenf94b61c2009-05-27 21:56:55 +0200321
Huang Yingbf783f92009-07-31 09:41:43 +0800322 bust_spinlocks(1);
323 console_verbose();
324 } else {
325 /* Don't log too much for fake panic */
326 if (atomic_inc_return(&mce_fake_paniced) > 1)
327 return;
328 }
Andi Kleena0189c72009-05-27 21:56:54 +0200329 /* First print corrected ones that are still unlogged */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 for (i = 0; i < MCE_LOG_LEN; i++) {
Andi Kleena0189c72009-05-27 21:56:54 +0200331 struct mce *m = &mcelog.entry[i];
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900332 if (!(m->status & MCI_STATUS_VAL))
333 continue;
Huang Ying482908b2010-05-18 14:35:22 +0800334 if (!(m->status & MCI_STATUS_UC)) {
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900335 print_mce(m);
Huang Ying482908b2010-05-18 14:35:22 +0800336 if (!apei_err)
337 apei_err = apei_write_mce(m);
338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
Andi Kleena0189c72009-05-27 21:56:54 +0200340 /* Now print uncorrected but with the final one last */
341 for (i = 0; i < MCE_LOG_LEN; i++) {
342 struct mce *m = &mcelog.entry[i];
343 if (!(m->status & MCI_STATUS_VAL))
344 continue;
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900345 if (!(m->status & MCI_STATUS_UC))
346 continue;
Huang Ying482908b2010-05-18 14:35:22 +0800347 if (!final || memcmp(m, final, sizeof(struct mce))) {
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900348 print_mce(m);
Huang Ying482908b2010-05-18 14:35:22 +0800349 if (!apei_err)
350 apei_err = apei_write_mce(m);
351 }
Andi Kleena0189c72009-05-27 21:56:54 +0200352 }
Huang Ying482908b2010-05-18 14:35:22 +0800353 if (final) {
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900354 print_mce(final);
Huang Ying482908b2010-05-18 14:35:22 +0800355 if (!apei_err)
356 apei_err = apei_write_mce(final);
357 }
Andi Kleen3c079792009-05-27 21:56:55 +0200358 if (cpu_missing)
Huang Yinga2d7b0d2010-06-08 14:35:39 +0800359 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200360 if (exp)
Huang Yinga2d7b0d2010-06-08 14:35:39 +0800361 pr_emerg(HW_ERR "Machine check: %s\n", exp);
Huang Yingbf783f92009-07-31 09:41:43 +0800362 if (!fake_panic) {
363 if (panic_timeout == 0)
364 panic_timeout = mce_panic_timeout;
365 panic(msg);
366 } else
Huang Yinga2d7b0d2010-06-08 14:35:39 +0800367 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200368}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Andi Kleenea149b32009-04-29 19:31:00 +0200370/* Support code for software error injection */
371
372static int msr_to_offset(u32 msr)
373{
Tejun Heo0a3aee02010-12-18 16:28:55 +0100374 unsigned bank = __this_cpu_read(injectm.bank);
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200375
Andi Kleenea149b32009-04-29 19:31:00 +0200376 if (msr == rip_msr)
377 return offsetof(struct mce, ip);
Andi Kleena2d32bc2009-07-09 00:31:44 +0200378 if (msr == MSR_IA32_MCx_STATUS(bank))
Andi Kleenea149b32009-04-29 19:31:00 +0200379 return offsetof(struct mce, status);
Andi Kleena2d32bc2009-07-09 00:31:44 +0200380 if (msr == MSR_IA32_MCx_ADDR(bank))
Andi Kleenea149b32009-04-29 19:31:00 +0200381 return offsetof(struct mce, addr);
Andi Kleena2d32bc2009-07-09 00:31:44 +0200382 if (msr == MSR_IA32_MCx_MISC(bank))
Andi Kleenea149b32009-04-29 19:31:00 +0200383 return offsetof(struct mce, misc);
384 if (msr == MSR_IA32_MCG_STATUS)
385 return offsetof(struct mce, mcgstatus);
386 return -1;
387}
388
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200389/* MSR access wrappers used for error injection */
390static u64 mce_rdmsrl(u32 msr)
391{
392 u64 v;
Ingo Molnar11868a22009-09-23 17:49:55 +0200393
Tejun Heo0a3aee02010-12-18 16:28:55 +0100394 if (__this_cpu_read(injectm.finished)) {
Andi Kleenea149b32009-04-29 19:31:00 +0200395 int offset = msr_to_offset(msr);
Ingo Molnar11868a22009-09-23 17:49:55 +0200396
Andi Kleenea149b32009-04-29 19:31:00 +0200397 if (offset < 0)
398 return 0;
399 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
400 }
Ingo Molnar11868a22009-09-23 17:49:55 +0200401
402 if (rdmsrl_safe(msr, &v)) {
403 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
404 /*
405 * Return zero in case the access faulted. This should
406 * not happen normally but can happen if the CPU does
407 * something weird, or if the code is buggy.
408 */
409 v = 0;
410 }
411
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200412 return v;
413}
414
415static void mce_wrmsrl(u32 msr, u64 v)
416{
Tejun Heo0a3aee02010-12-18 16:28:55 +0100417 if (__this_cpu_read(injectm.finished)) {
Andi Kleenea149b32009-04-29 19:31:00 +0200418 int offset = msr_to_offset(msr);
Ingo Molnar11868a22009-09-23 17:49:55 +0200419
Andi Kleenea149b32009-04-29 19:31:00 +0200420 if (offset >= 0)
421 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
422 return;
423 }
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200424 wrmsrl(msr, v);
425}
426
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200427/*
Hidetoshi Setob8325c52011-06-08 10:57:46 +0900428 * Collect all global (w.r.t. this processor) status about this machine
429 * check into our "mce" struct so that we can use it later to assess
430 * the severity of the problem as we read per-bank specific details.
431 */
432static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
433{
434 mce_setup(m);
435
436 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
437 if (regs) {
438 /*
439 * Get the address of the instruction at the time of
440 * the machine check error.
441 */
442 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
443 m->ip = regs->ip;
444 m->cs = regs->cs;
Andi Kleena129a7c2010-11-19 13:16:22 +0100445
446 /*
447 * When in VM86 mode make the cs look like ring 3
448 * always. This is a lie, but it's better than passing
449 * the additional vm86 bit around everywhere.
450 */
451 if (v8086_mode(regs))
452 m->cs |= 3;
Hidetoshi Setob8325c52011-06-08 10:57:46 +0900453 }
454 /* Use accurate RIP reporting if available. */
455 if (rip_msr)
456 m->ip = mce_rdmsrl(rip_msr);
457 }
458}
459
460/*
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200461 * Simple lockless ring to communicate PFNs from the exception handler with the
462 * process context work function. This is vastly simplified because there's
463 * only a single reader and a single writer.
464 */
465#define MCE_RING_SIZE 16 /* we use one entry less */
466
467struct mce_ring {
468 unsigned short start;
469 unsigned short end;
470 unsigned long ring[MCE_RING_SIZE];
471};
472static DEFINE_PER_CPU(struct mce_ring, mce_ring);
473
474/* Runs with CPU affinity in workqueue */
475static int mce_ring_empty(void)
476{
477 struct mce_ring *r = &__get_cpu_var(mce_ring);
478
479 return r->start == r->end;
480}
481
482static int mce_ring_get(unsigned long *pfn)
483{
484 struct mce_ring *r;
485 int ret = 0;
486
487 *pfn = 0;
488 get_cpu();
489 r = &__get_cpu_var(mce_ring);
490 if (r->start == r->end)
491 goto out;
492 *pfn = r->ring[r->start];
493 r->start = (r->start + 1) % MCE_RING_SIZE;
494 ret = 1;
495out:
496 put_cpu();
497 return ret;
498}
499
500/* Always runs in MCE context with preempt off */
501static int mce_ring_add(unsigned long pfn)
502{
503 struct mce_ring *r = &__get_cpu_var(mce_ring);
504 unsigned next;
505
506 next = (r->end + 1) % MCE_RING_SIZE;
507 if (next == r->start)
508 return -1;
509 r->ring[r->end] = pfn;
510 wmb();
511 r->end = next;
512 return 0;
513}
514
Andi Kleen88ccbed2009-02-12 13:49:36 +0100515int mce_available(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Andi Kleen04b2b1a2009-04-28 22:50:19 +0200517 if (mce_disabled)
Andi Kleen5b4408f2009-02-12 13:39:30 +0100518 return 0;
Akinobu Mita3d1712c2006-03-24 03:15:11 -0800519 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520}
521
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200522static void mce_schedule_work(void)
523{
524 if (!mce_ring_empty()) {
525 struct work_struct *work = &__get_cpu_var(mce_work);
526 if (!work_pending(work))
527 schedule_work(work);
528 }
529}
530
Hidetoshi Setob77e70b2011-06-08 10:56:02 +0900531DEFINE_PER_CPU(struct irq_work, mce_irq_work);
532
533static void mce_irq_work_cb(struct irq_work *entry)
Andi Kleenccc3c312009-05-27 21:56:54 +0200534{
Andi Kleen9ff36ee2009-05-27 21:56:58 +0200535 mce_notify_irq();
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200536 mce_schedule_work();
Andi Kleenccc3c312009-05-27 21:56:54 +0200537}
Andi Kleenccc3c312009-05-27 21:56:54 +0200538
539static void mce_report_event(struct pt_regs *regs)
540{
541 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
Andi Kleen9ff36ee2009-05-27 21:56:58 +0200542 mce_notify_irq();
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200543 /*
544 * Triggering the work queue here is just an insurance
545 * policy in case the syscall exit notify handler
546 * doesn't run soon enough or ends up running on the
547 * wrong CPU (can happen when audit sleeps)
548 */
549 mce_schedule_work();
Andi Kleenccc3c312009-05-27 21:56:54 +0200550 return;
551 }
552
Hidetoshi Setob77e70b2011-06-08 10:56:02 +0900553 irq_work_queue(&__get_cpu_var(mce_irq_work));
Andi Kleenccc3c312009-05-27 21:56:54 +0200554}
555
Tony Luck85f926942011-12-13 09:48:13 -0800556/*
557 * Read ADDR and MISC registers.
558 */
559static void mce_read_aux(struct mce *m, int i)
560{
561 if (m->status & MCI_STATUS_MISCV)
562 m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
563 if (m->status & MCI_STATUS_ADDRV) {
564 m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
565
566 /*
567 * Mask the reported address by the reported granularity.
568 */
569 if (mce_ser && (m->status & MCI_STATUS_MISCV)) {
570 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
571 m->addr >>= shift;
572 m->addr <<= shift;
573 }
574 }
575}
576
Andi Kleenca84f692009-05-27 21:56:57 +0200577DEFINE_PER_CPU(unsigned, mce_poll_count);
578
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200579/*
Andi Kleenb79109c2009-02-12 13:43:23 +0100580 * Poll for corrected events or events that happened before reset.
581 * Those are just logged through /dev/mcelog.
582 *
583 * This is executed in standard interrupt context.
Andi Kleened7290d2009-05-27 21:56:57 +0200584 *
585 * Note: spec recommends to panic for fatal unsignalled
586 * errors here. However this would be quite problematic --
587 * we would need to reimplement the Monarch handling and
588 * it would mess up the exclusion between exception handler
589 * and poll hander -- * so we skip this for now.
590 * These cases should not happen anyways, or only when the CPU
591 * is already totally * confused. In this case it's likely it will
592 * not fully execute the machine check handler either.
Andi Kleenb79109c2009-02-12 13:43:23 +0100593 */
Andi Kleenee031c32009-02-12 13:49:34 +0100594void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
Andi Kleenb79109c2009-02-12 13:43:23 +0100595{
596 struct mce m;
597 int i;
598
Alex Shic6ae41e2012-05-11 15:35:27 +0800599 this_cpu_inc(mce_poll_count);
Andi Kleenca84f692009-05-27 21:56:57 +0200600
Hidetoshi Setob8325c52011-06-08 10:57:46 +0900601 mce_gather_info(&m, NULL);
Andi Kleenb79109c2009-02-12 13:43:23 +0100602
Andi Kleenb79109c2009-02-12 13:43:23 +0100603 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +0200604 if (!mce_banks[i].ctl || !test_bit(i, *b))
Andi Kleenb79109c2009-02-12 13:43:23 +0100605 continue;
606
607 m.misc = 0;
608 m.addr = 0;
609 m.bank = i;
610 m.tsc = 0;
611
612 barrier();
Andi Kleena2d32bc2009-07-09 00:31:44 +0200613 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
Andi Kleenb79109c2009-02-12 13:43:23 +0100614 if (!(m.status & MCI_STATUS_VAL))
615 continue;
616
617 /*
Andi Kleened7290d2009-05-27 21:56:57 +0200618 * Uncorrected or signalled events are handled by the exception
619 * handler when it is enabled, so don't process those here.
Andi Kleenb79109c2009-02-12 13:43:23 +0100620 *
621 * TBD do the same check for MCI_STATUS_EN here?
622 */
Andi Kleened7290d2009-05-27 21:56:57 +0200623 if (!(flags & MCP_UC) &&
624 (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
Andi Kleenb79109c2009-02-12 13:43:23 +0100625 continue;
626
Tony Luck85f926942011-12-13 09:48:13 -0800627 mce_read_aux(&m, i);
Andi Kleenb79109c2009-02-12 13:43:23 +0100628
629 if (!(flags & MCP_TIMESTAMP))
630 m.tsc = 0;
631 /*
632 * Don't get the IP here because it's unlikely to
633 * have anything to do with the actual error location.
634 */
Borislav Petkovf0cb5452011-07-18 11:24:45 -0300635 if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce)
Andi Kleen5679af42009-04-07 17:06:55 +0200636 mce_log(&m);
Andi Kleenb79109c2009-02-12 13:43:23 +0100637
638 /*
639 * Clear state for this bank.
640 */
Andi Kleena2d32bc2009-07-09 00:31:44 +0200641 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
Andi Kleenb79109c2009-02-12 13:43:23 +0100642 }
643
644 /*
645 * Don't clear MCG_STATUS here because it's only defined for
646 * exceptions.
647 */
Andi Kleen88921be2009-05-27 21:56:51 +0200648
649 sync_core();
Andi Kleenb79109c2009-02-12 13:43:23 +0100650}
Andi Kleenea149b32009-04-29 19:31:00 +0200651EXPORT_SYMBOL_GPL(machine_check_poll);
Andi Kleenb79109c2009-02-12 13:43:23 +0100652
653/*
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200654 * Do a quick check if any of the events requires a panic.
655 * This decides if we keep the events around or clear them.
656 */
Tony Luck61b0fcc2012-07-19 11:28:46 -0700657static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
658 struct pt_regs *regs)
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200659{
Tony Luck95022b82012-04-18 15:19:40 -0700660 int i, ret = 0;
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200661
662 for (i = 0; i < banks; i++) {
Andi Kleena2d32bc2009-07-09 00:31:44 +0200663 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
Tony Luck61b0fcc2012-07-19 11:28:46 -0700664 if (m->status & MCI_STATUS_VAL) {
Tony Luck95022b82012-04-18 15:19:40 -0700665 __set_bit(i, validp);
Tony Luck61b0fcc2012-07-19 11:28:46 -0700666 if (quirk_no_way_out)
667 quirk_no_way_out(i, m, regs);
668 }
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200669 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
Tony Luck95022b82012-04-18 15:19:40 -0700670 ret = 1;
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200671 }
Tony Luck95022b82012-04-18 15:19:40 -0700672 return ret;
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200673}
674
675/*
Andi Kleen3c079792009-05-27 21:56:55 +0200676 * Variable to establish order between CPUs while scanning.
677 * Each CPU spins initially until executing is equal its number.
678 */
679static atomic_t mce_executing;
680
681/*
682 * Defines order of CPUs on entry. First CPU becomes Monarch.
683 */
684static atomic_t mce_callin;
685
686/*
687 * Check if a timeout waiting for other CPUs happened.
688 */
689static int mce_timed_out(u64 *t)
690{
691 /*
692 * The others already did panic for some reason.
693 * Bail out like in a timeout.
694 * rmb() to tell the compiler that system_state
695 * might have been modified by someone else.
696 */
697 rmb();
698 if (atomic_read(&mce_paniced))
699 wait_for_panic();
700 if (!monarch_timeout)
701 goto out;
702 if ((s64)*t < SPINUNIT) {
703 /* CHECKME: Make panic default for 1 too? */
704 if (tolerant < 1)
705 mce_panic("Timeout synchronizing machine check over CPUs",
706 NULL, NULL);
707 cpu_missing = 1;
708 return 1;
709 }
710 *t -= SPINUNIT;
711out:
712 touch_nmi_watchdog();
713 return 0;
714}
715
716/*
717 * The Monarch's reign. The Monarch is the CPU who entered
718 * the machine check handler first. It waits for the others to
719 * raise the exception too and then grades them. When any
720 * error is fatal panic. Only then let the others continue.
721 *
722 * The other CPUs entering the MCE handler will be controlled by the
723 * Monarch. They are called Subjects.
724 *
725 * This way we prevent any potential data corruption in a unrecoverable case
726 * and also makes sure always all CPU's errors are examined.
727 *
Hidetoshi Seto680b6cf2009-08-26 16:20:36 +0900728 * Also this detects the case of a machine check event coming from outer
Andi Kleen3c079792009-05-27 21:56:55 +0200729 * space (not detected by any CPUs) In this case some external agent wants
730 * us to shut down, so panic too.
731 *
732 * The other CPUs might still decide to panic if the handler happens
733 * in a unrecoverable place, but in this case the system is in a semi-stable
734 * state and won't corrupt anything by itself. It's ok to let the others
735 * continue for a bit first.
736 *
737 * All the spin loops have timeouts; when a timeout happens a CPU
738 * typically elects itself to be Monarch.
739 */
740static void mce_reign(void)
741{
742 int cpu;
743 struct mce *m = NULL;
744 int global_worst = 0;
745 char *msg = NULL;
746 char *nmsg = NULL;
747
748 /*
749 * This CPU is the Monarch and the other CPUs have run
750 * through their handlers.
751 * Grade the severity of the errors of all the CPUs.
752 */
753 for_each_possible_cpu(cpu) {
754 int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
755 &nmsg);
756 if (severity > global_worst) {
757 msg = nmsg;
758 global_worst = severity;
759 m = &per_cpu(mces_seen, cpu);
760 }
761 }
762
763 /*
764 * Cannot recover? Panic here then.
765 * This dumps all the mces in the log buffer and stops the
766 * other CPUs.
767 */
768 if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
Andi Kleenac960372009-05-27 21:56:58 +0200769 mce_panic("Fatal Machine check", m, msg);
Andi Kleen3c079792009-05-27 21:56:55 +0200770
771 /*
772 * For UC somewhere we let the CPU who detects it handle it.
773 * Also must let continue the others, otherwise the handling
774 * CPU could deadlock on a lock.
775 */
776
777 /*
778 * No machine check event found. Must be some external
779 * source or one CPU is hung. Panic.
780 */
Hidetoshi Seto680b6cf2009-08-26 16:20:36 +0900781 if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
Andi Kleen3c079792009-05-27 21:56:55 +0200782 mce_panic("Machine check from unknown source", NULL, NULL);
783
784 /*
785 * Now clear all the mces_seen so that they don't reappear on
786 * the next mce.
787 */
788 for_each_possible_cpu(cpu)
789 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
790}
791
792static atomic_t global_nwo;
793
794/*
795 * Start of Monarch synchronization. This waits until all CPUs have
796 * entered the exception handler and then determines if any of them
797 * saw a fatal event that requires panic. Then it executes them
798 * in the entry order.
799 * TBD double check parallel CPU hotunplug
800 */
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900801static int mce_start(int *no_way_out)
Andi Kleen3c079792009-05-27 21:56:55 +0200802{
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900803 int order;
Andi Kleen3c079792009-05-27 21:56:55 +0200804 int cpus = num_online_cpus();
805 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
806
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900807 if (!timeout)
808 return -1;
Andi Kleen3c079792009-05-27 21:56:55 +0200809
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900810 atomic_add(*no_way_out, &global_nwo);
Huang Ying184e1fd2009-06-15 15:37:07 +0800811 /*
812 * global_nwo should be updated before mce_callin
813 */
814 smp_wmb();
Borislav Petkova95436e2009-06-20 23:28:22 -0700815 order = atomic_inc_return(&mce_callin);
Andi Kleen3c079792009-05-27 21:56:55 +0200816
817 /*
818 * Wait for everyone.
819 */
820 while (atomic_read(&mce_callin) != cpus) {
821 if (mce_timed_out(&timeout)) {
822 atomic_set(&global_nwo, 0);
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900823 return -1;
Andi Kleen3c079792009-05-27 21:56:55 +0200824 }
825 ndelay(SPINUNIT);
826 }
827
828 /*
Huang Ying184e1fd2009-06-15 15:37:07 +0800829 * mce_callin should be read before global_nwo
830 */
831 smp_rmb();
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900832
833 if (order == 1) {
834 /*
835 * Monarch: Starts executing now, the others wait.
836 */
837 atomic_set(&mce_executing, 1);
838 } else {
839 /*
840 * Subject: Now start the scanning loop one by one in
841 * the original callin order.
842 * This way when there are any shared banks it will be
843 * only seen by one CPU before cleared, avoiding duplicates.
844 */
845 while (atomic_read(&mce_executing) < order) {
846 if (mce_timed_out(&timeout)) {
847 atomic_set(&global_nwo, 0);
848 return -1;
849 }
850 ndelay(SPINUNIT);
851 }
852 }
853
Huang Ying184e1fd2009-06-15 15:37:07 +0800854 /*
Andi Kleen3c079792009-05-27 21:56:55 +0200855 * Cache the global no_way_out state.
856 */
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900857 *no_way_out = atomic_read(&global_nwo);
Andi Kleen3c079792009-05-27 21:56:55 +0200858
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900859 return order;
Andi Kleen3c079792009-05-27 21:56:55 +0200860}
861
862/*
863 * Synchronize between CPUs after main scanning loop.
864 * This invokes the bulk of the Monarch processing.
865 */
866static int mce_end(int order)
867{
868 int ret = -1;
869 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
870
871 if (!timeout)
872 goto reset;
873 if (order < 0)
874 goto reset;
875
876 /*
877 * Allow others to run.
878 */
879 atomic_inc(&mce_executing);
880
881 if (order == 1) {
882 /* CHECKME: Can this race with a parallel hotplug? */
883 int cpus = num_online_cpus();
884
885 /*
886 * Monarch: Wait for everyone to go through their scanning
887 * loops.
888 */
889 while (atomic_read(&mce_executing) <= cpus) {
890 if (mce_timed_out(&timeout))
891 goto reset;
892 ndelay(SPINUNIT);
893 }
894
895 mce_reign();
896 barrier();
897 ret = 0;
898 } else {
899 /*
900 * Subject: Wait for Monarch to finish.
901 */
902 while (atomic_read(&mce_executing) != 0) {
903 if (mce_timed_out(&timeout))
904 goto reset;
905 ndelay(SPINUNIT);
906 }
907
908 /*
909 * Don't reset anything. That's done by the Monarch.
910 */
911 return 0;
912 }
913
914 /*
915 * Reset all global state.
916 */
917reset:
918 atomic_set(&global_nwo, 0);
919 atomic_set(&mce_callin, 0);
920 barrier();
921
922 /*
923 * Let others run again.
924 */
925 atomic_set(&mce_executing, 0);
926 return ret;
927}
928
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200929/*
930 * Check if the address reported by the CPU is in a format we can parse.
931 * It would be possible to add code for most other cases, but all would
932 * be somewhat complicated (e.g. segment offset would require an instruction
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300933 * parser). So only support physical addresses up to page granuality for now.
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200934 */
935static int mce_usable_address(struct mce *m)
936{
937 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
938 return 0;
Hidetoshi Seto2b90e772011-06-08 10:56:56 +0900939 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200940 return 0;
Hidetoshi Seto2b90e772011-06-08 10:56:56 +0900941 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200942 return 0;
943 return 1;
944}
945
Andi Kleen3c079792009-05-27 21:56:55 +0200946static void mce_clear_state(unsigned long *toclear)
947{
948 int i;
949
950 for (i = 0; i < banks; i++) {
951 if (test_bit(i, toclear))
Andi Kleena2d32bc2009-07-09 00:31:44 +0200952 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
Andi Kleen3c079792009-05-27 21:56:55 +0200953 }
954}
955
956/*
Tony Luckaf104e32011-12-14 15:55:20 -0800957 * Need to save faulting physical address associated with a process
958 * in the machine check handler some place where we can grab it back
959 * later in mce_notify_process()
960 */
961#define MCE_INFO_MAX 16
962
963struct mce_info {
964 atomic_t inuse;
965 struct task_struct *t;
966 __u64 paddr;
Tony Luckdad17432012-05-14 15:07:48 -0700967 int restartable;
Tony Luckaf104e32011-12-14 15:55:20 -0800968} mce_info[MCE_INFO_MAX];
969
Tony Luckdad17432012-05-14 15:07:48 -0700970static void mce_save_info(__u64 addr, int c)
Tony Luckaf104e32011-12-14 15:55:20 -0800971{
972 struct mce_info *mi;
973
974 for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
975 if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
976 mi->t = current;
977 mi->paddr = addr;
Tony Luckdad17432012-05-14 15:07:48 -0700978 mi->restartable = c;
Tony Luckaf104e32011-12-14 15:55:20 -0800979 return;
980 }
981 }
982
983 mce_panic("Too many concurrent recoverable errors", NULL, NULL);
984}
985
986static struct mce_info *mce_find_info(void)
987{
988 struct mce_info *mi;
989
990 for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
991 if (atomic_read(&mi->inuse) && mi->t == current)
992 return mi;
993 return NULL;
994}
995
996static void mce_clear_info(struct mce_info *mi)
997{
998 atomic_set(&mi->inuse, 0);
999}
1000
1001/*
Andi Kleenb79109c2009-02-12 13:43:23 +01001002 * The actual machine check handler. This only handles real
1003 * exceptions when something got corrupted coming in through int 18.
1004 *
1005 * This is executed in NMI context not subject to normal locking rules. This
1006 * implies that most kernel services cannot be safely used. Don't even
1007 * think about putting a printk in there!
Andi Kleen3c079792009-05-27 21:56:55 +02001008 *
1009 * On Intel systems this is entered on all CPUs in parallel through
1010 * MCE broadcast. However some CPUs might be broken beyond repair,
1011 * so be always careful when synchronizing with others.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 */
Ingo Molnare9eee032009-04-08 12:31:17 +02001013void do_machine_check(struct pt_regs *regs, long error_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014{
Andi Kleen3c079792009-05-27 21:56:55 +02001015 struct mce m, *final;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 int i;
Andi Kleen3c079792009-05-27 21:56:55 +02001017 int worst = 0;
1018 int severity;
1019 /*
1020 * Establish sequential order between the CPUs entering the machine
1021 * check handler.
1022 */
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +09001023 int order;
Tim Hockinbd784322007-07-21 17:10:37 +02001024 /*
1025 * If no_way_out gets set, there is no safe way to recover from this
1026 * MCE. If tolerant is cranked up, we'll try anyway.
1027 */
1028 int no_way_out = 0;
1029 /*
1030 * If kill_it gets set, there might be a way to recover from this
1031 * error.
1032 */
1033 int kill_it = 0;
Andi Kleenb79109c2009-02-12 13:43:23 +01001034 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
Tony Luck95022b82012-04-18 15:19:40 -07001035 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
Andi Kleenbd19a5e2009-05-27 21:56:55 +02001036 char *msg = "Unknown";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Andi Kleen553f2652006-04-07 19:49:57 +02001038 atomic_inc(&mce_entry);
1039
Alex Shic6ae41e2012-05-11 15:35:27 +08001040 this_cpu_inc(mce_exception_count);
Andi Kleen01ca79f2009-05-27 21:56:52 +02001041
Andi Kleenb79109c2009-02-12 13:43:23 +01001042 if (!banks)
Andi Kleen32561692009-05-27 21:56:53 +02001043 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Hidetoshi Setob8325c52011-06-08 10:57:46 +09001045 mce_gather_info(&m, regs);
Andi Kleenb5f2fa42009-02-12 13:43:22 +01001046
Andi Kleen3c079792009-05-27 21:56:55 +02001047 final = &__get_cpu_var(mces_seen);
1048 *final = m;
1049
Tony Luck95022b82012-04-18 15:19:40 -07001050 memset(valid_banks, 0, sizeof(valid_banks));
Tony Luck61b0fcc2012-07-19 11:28:46 -07001051 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
Hidetoshi Seto680b6cf2009-08-26 16:20:36 +09001052
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 barrier();
1054
Andi Kleen3c079792009-05-27 21:56:55 +02001055 /*
Tony Lucka8c321f2012-01-03 11:45:45 -08001056 * When no restart IP might need to kill or panic.
1057 * Assume the worst for now, but if we find the
1058 * severity is MCE_AR_SEVERITY we have other options.
Andi Kleened7290d2009-05-27 21:56:57 +02001059 */
1060 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1061 kill_it = 1;
1062
1063 /*
Andi Kleen3c079792009-05-27 21:56:55 +02001064 * Go through all the banks in exclusion of the other CPUs.
1065 * This way we don't report duplicated events on shared banks
1066 * because the first one to see it will clear it.
1067 */
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +09001068 order = mce_start(&no_way_out);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 for (i = 0; i < banks; i++) {
Andi Kleenb79109c2009-02-12 13:43:23 +01001070 __clear_bit(i, toclear);
Tony Luck95022b82012-04-18 15:19:40 -07001071 if (!test_bit(i, valid_banks))
1072 continue;
Andi Kleencebe1822009-07-09 00:31:43 +02001073 if (!mce_banks[i].ctl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 continue;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001075
1076 m.misc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 m.addr = 0;
1078 m.bank = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Andi Kleena2d32bc2009-07-09 00:31:44 +02001080 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if ((m.status & MCI_STATUS_VAL) == 0)
1082 continue;
1083
Andi Kleenb79109c2009-02-12 13:43:23 +01001084 /*
Andi Kleened7290d2009-05-27 21:56:57 +02001085 * Non uncorrected or non signaled errors are handled by
1086 * machine_check_poll. Leave them alone, unless this panics.
Andi Kleenb79109c2009-02-12 13:43:23 +01001087 */
Andi Kleened7290d2009-05-27 21:56:57 +02001088 if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1089 !no_way_out)
Andi Kleenb79109c2009-02-12 13:43:23 +01001090 continue;
1091
1092 /*
1093 * Set taint even when machine check was not enabled.
1094 */
1095 add_taint(TAINT_MACHINE_CHECK);
1096
Andi Kleened7290d2009-05-27 21:56:57 +02001097 severity = mce_severity(&m, tolerant, NULL);
Andi Kleenb79109c2009-02-12 13:43:23 +01001098
Andi Kleened7290d2009-05-27 21:56:57 +02001099 /*
1100 * When machine check was for corrected handler don't touch,
1101 * unless we're panicing.
1102 */
1103 if (severity == MCE_KEEP_SEVERITY && !no_way_out)
1104 continue;
1105 __set_bit(i, toclear);
1106 if (severity == MCE_NO_SEVERITY) {
Andi Kleenb79109c2009-02-12 13:43:23 +01001107 /*
1108 * Machine check event was not enabled. Clear, but
1109 * ignore.
1110 */
1111 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 }
1113
Tony Luck85f926942011-12-13 09:48:13 -08001114 mce_read_aux(&m, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001116 /*
1117 * Action optional error. Queue address for later processing.
1118 * When the ring overflows we just ignore the AO error.
1119 * RED-PEN add some logging mechanism when
1120 * usable_address or mce_add_ring fails.
1121 * RED-PEN don't ignore overflow for tolerant == 0
1122 */
1123 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1124 mce_ring_add(m.addr >> PAGE_SHIFT);
1125
Andi Kleenb79109c2009-02-12 13:43:23 +01001126 mce_log(&m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Andi Kleen3c079792009-05-27 21:56:55 +02001128 if (severity > worst) {
1129 *final = m;
1130 worst = severity;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
1133
Tony Lucka8c321f2012-01-03 11:45:45 -08001134 /* mce_clear_state will clear *final, save locally for use later */
1135 m = *final;
1136
Andi Kleen3c079792009-05-27 21:56:55 +02001137 if (!no_way_out)
1138 mce_clear_state(toclear);
1139
Ingo Molnare9eee032009-04-08 12:31:17 +02001140 /*
Andi Kleen3c079792009-05-27 21:56:55 +02001141 * Do most of the synchronization with other CPUs.
1142 * When there's any problem use only local no_way_out state.
Ingo Molnare9eee032009-04-08 12:31:17 +02001143 */
Andi Kleen3c079792009-05-27 21:56:55 +02001144 if (mce_end(order) < 0)
1145 no_way_out = worst >= MCE_PANIC_SEVERITY;
Tim Hockinbd784322007-07-21 17:10:37 +02001146
1147 /*
Tony Lucka8c321f2012-01-03 11:45:45 -08001148 * At insane "tolerant" levels we take no action. Otherwise
1149 * we only die if we have no other choice. For less serious
1150 * issues we try to recover, or limit damage to the current
1151 * process.
Tim Hockinbd784322007-07-21 17:10:37 +02001152 */
Tony Lucka8c321f2012-01-03 11:45:45 -08001153 if (tolerant < 3) {
1154 if (no_way_out)
1155 mce_panic("Fatal machine check on current CPU", &m, msg);
1156 if (worst == MCE_AR_SEVERITY) {
1157 /* schedule action before return to userland */
Tony Luckdad17432012-05-14 15:07:48 -07001158 mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
Tony Lucka8c321f2012-01-03 11:45:45 -08001159 set_thread_flag(TIF_MCE_NOTIFY);
1160 } else if (kill_it) {
1161 force_sig(SIGBUS, current);
1162 }
1163 }
Tim Hockine02e68d2007-07-21 17:10:36 +02001164
Andi Kleen3c079792009-05-27 21:56:55 +02001165 if (worst > 0)
1166 mce_report_event(regs);
Andi Kleen5f8c1a52009-04-29 19:29:12 +02001167 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
Andi Kleen32561692009-05-27 21:56:53 +02001168out:
Andi Kleen553f2652006-04-07 19:49:57 +02001169 atomic_dec(&mce_entry);
Andi Kleen88921be2009-05-27 21:56:51 +02001170 sync_core();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171}
Andi Kleenea149b32009-04-29 19:31:00 +02001172EXPORT_SYMBOL_GPL(do_machine_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
Tony Luckcd42f4a2011-12-15 10:48:12 -08001174#ifndef CONFIG_MEMORY_FAILURE
1175int memory_failure(unsigned long pfn, int vector, int flags)
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001176{
Tony Lucka8c321f2012-01-03 11:45:45 -08001177 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1178 BUG_ON(flags & MF_ACTION_REQUIRED);
Joe Perchesc767a542012-05-21 19:50:07 -07001179 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1180 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1181 pfn);
Tony Luckcd42f4a2011-12-15 10:48:12 -08001182
1183 return 0;
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001184}
Tony Luckcd42f4a2011-12-15 10:48:12 -08001185#endif
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001186
1187/*
Tony Lucka8c321f2012-01-03 11:45:45 -08001188 * Called in process context that interrupted by MCE and marked with
1189 * TIF_MCE_NOTIFY, just before returning to erroneous userland.
1190 * This code is allowed to sleep.
1191 * Attempt possible recovery such as calling the high level VM handler to
1192 * process any corrupted pages, and kill/signal current process if required.
1193 * Action required errors are handled here.
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001194 */
1195void mce_notify_process(void)
1196{
1197 unsigned long pfn;
Tony Lucka8c321f2012-01-03 11:45:45 -08001198 struct mce_info *mi = mce_find_info();
1199
1200 if (!mi)
1201 mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
1202 pfn = mi->paddr >> PAGE_SHIFT;
1203
1204 clear_thread_flag(TIF_MCE_NOTIFY);
1205
1206 pr_err("Uncorrected hardware memory error in user-access at %llx",
1207 mi->paddr);
Tony Luckdad17432012-05-14 15:07:48 -07001208 /*
1209 * We must call memory_failure() here even if the current process is
1210 * doomed. We still need to mark the page as poisoned and alert any
1211 * other users of the page.
1212 */
1213 if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
1214 mi->restartable == 0) {
Tony Lucka8c321f2012-01-03 11:45:45 -08001215 pr_err("Memory error not recovered");
1216 force_sig(SIGBUS, current);
1217 }
1218 mce_clear_info(mi);
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001219}
1220
Tony Lucka8c321f2012-01-03 11:45:45 -08001221/*
1222 * Action optional processing happens here (picking up
1223 * from the list of faulting pages that do_machine_check()
1224 * placed into the "ring").
1225 */
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001226static void mce_process_work(struct work_struct *dummy)
1227{
Tony Lucka8c321f2012-01-03 11:45:45 -08001228 unsigned long pfn;
1229
1230 while (mce_ring_get(&pfn))
1231 memory_failure(pfn, MCE_VECTOR, 0);
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001232}
1233
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001234#ifdef CONFIG_X86_MCE_INTEL
1235/***
1236 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
Simon Arlott676b1852007-10-20 01:25:36 +02001237 * @cpu: The CPU on which the event occurred.
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001238 * @status: Event status information
1239 *
1240 * This function should be called by the thermal interrupt after the
1241 * event has been processed and the decision was made to log the event
1242 * further.
1243 *
1244 * The status parameter will be saved to the 'status' field of 'struct mce'
1245 * and historically has been the register value of the
1246 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1247 */
Andi Kleenb5f2fa42009-02-12 13:43:22 +01001248void mce_log_therm_throt_event(__u64 status)
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001249{
1250 struct mce m;
1251
Andi Kleenb5f2fa42009-02-12 13:43:22 +01001252 mce_setup(&m);
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001253 m.bank = MCE_THERMAL_BANK;
1254 m.status = status;
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001255 mce_log(&m);
1256}
1257#endif /* CONFIG_X86_MCE_INTEL */
1258
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259/*
Tim Hockin8a336b02007-05-02 19:27:19 +02001260 * Periodic polling timer for "silent" machine check errors. If the
1261 * poller finds an MCE, poll 2x faster. When the poller finds no more
1262 * errors, poll 2x slower (up to check_interval seconds).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 */
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001264static unsigned long check_interval = 5 * 60; /* 5 minutes */
Ingo Molnare9eee032009-04-08 12:31:17 +02001265
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001266static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
Andi Kleen52d168e2009-02-12 13:39:29 +01001267static DEFINE_PER_CPU(struct timer_list, mce_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001269static void mce_timer_fn(unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270{
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001271 struct timer_list *t = &__get_cpu_var(mce_timer);
1272 unsigned long iv;
Andi Kleen52d168e2009-02-12 13:39:29 +01001273
1274 WARN_ON(smp_processor_id() != data);
1275
Tejun Heo7b543a52010-12-18 16:30:05 +01001276 if (mce_available(__this_cpu_ptr(&cpu_info))) {
Andi Kleenee031c32009-02-12 13:49:34 +01001277 machine_check_poll(MCP_TIMESTAMP,
1278 &__get_cpu_var(mce_poll_banks));
Ingo Molnare9eee032009-04-08 12:31:17 +02001279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
1281 /*
Tim Hockine02e68d2007-07-21 17:10:36 +02001282 * Alert userspace if needed. If we logged an MCE, reduce the
1283 * polling interval, otherwise increase the polling interval.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 */
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001285 iv = __this_cpu_read(mce_next_interval);
Andi Kleen9ff36ee2009-05-27 21:56:58 +02001286 if (mce_notify_irq())
Chen Gong958fb3c2012-06-05 10:35:02 +08001287 iv = max(iv / 2, (unsigned long) HZ/100);
Hidetoshi Seto14a02532009-04-30 16:04:51 +09001288 else
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001289 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1290 __this_cpu_write(mce_next_interval, iv);
Tim Hockin8a336b02007-05-02 19:27:19 +02001291
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001292 t->expires = jiffies + iv;
Hidetoshi Seto5be60662009-06-24 09:21:10 +09001293 add_timer_on(t, smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
1295
Hidetoshi Seto9aaef962011-06-17 04:40:36 -04001296/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1297static void mce_timer_delete_all(void)
1298{
1299 int cpu;
1300
1301 for_each_online_cpu(cpu)
1302 del_timer_sync(&per_cpu(mce_timer, cpu));
1303}
1304
Andi Kleen9bd98402009-02-12 13:39:28 +01001305static void mce_do_trigger(struct work_struct *work)
1306{
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001307 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
Andi Kleen9bd98402009-02-12 13:39:28 +01001308}
1309
1310static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1311
Tim Hockine02e68d2007-07-21 17:10:36 +02001312/*
Andi Kleen9bd98402009-02-12 13:39:28 +01001313 * Notify the user(s) about new machine check events.
1314 * Can be called from interrupt context, but not from machine check/NMI
1315 * context.
Tim Hockine02e68d2007-07-21 17:10:36 +02001316 */
Andi Kleen9ff36ee2009-05-27 21:56:58 +02001317int mce_notify_irq(void)
Tim Hockine02e68d2007-07-21 17:10:36 +02001318{
Andi Kleen8457c842009-02-12 13:49:33 +01001319 /* Not more than two messages every minute */
1320 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1321
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001322 if (test_and_clear_bit(0, &mce_need_notify)) {
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001323 /* wake processes polling /dev/mcelog */
1324 wake_up_interruptible(&mce_chrdev_wait);
Andi Kleen9bd98402009-02-12 13:39:28 +01001325
1326 /*
1327 * There is no risk of missing notifications because
1328 * work_pending is always cleared before the function is
1329 * executed.
1330 */
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001331 if (mce_helper[0] && !work_pending(&mce_trigger_work))
Andi Kleen9bd98402009-02-12 13:39:28 +01001332 schedule_work(&mce_trigger_work);
Tim Hockine02e68d2007-07-21 17:10:36 +02001333
Andi Kleen8457c842009-02-12 13:49:33 +01001334 if (__ratelimit(&ratelimit))
Huang Yinga2d7b0d2010-06-08 14:35:39 +08001335 pr_info(HW_ERR "Machine check events logged\n");
Tim Hockine02e68d2007-07-21 17:10:36 +02001336
1337 return 1;
1338 }
1339 return 0;
1340}
Andi Kleen9ff36ee2009-05-27 21:56:58 +02001341EXPORT_SYMBOL_GPL(mce_notify_irq);
Tim Hockine02e68d2007-07-21 17:10:36 +02001342
Hidetoshi Setocffd3772009-11-12 15:52:40 +09001343static int __cpuinit __mcheck_cpu_mce_banks_init(void)
Andi Kleencebe1822009-07-09 00:31:43 +02001344{
1345 int i;
1346
1347 mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
1348 if (!mce_banks)
1349 return -ENOMEM;
1350 for (i = 0; i < banks; i++) {
1351 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02001352
Andi Kleencebe1822009-07-09 00:31:43 +02001353 b->ctl = -1ULL;
1354 b->init = 1;
1355 }
1356 return 0;
1357}
1358
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001359/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 * Initialize Machine Checks for a CPU.
1361 */
Borislav Petkov5e099542009-10-16 12:31:32 +02001362static int __cpuinit __mcheck_cpu_cap_init(void)
Andi Kleen0d7482e32009-02-17 23:07:13 +01001363{
Andi Kleen0d7482e32009-02-17 23:07:13 +01001364 unsigned b;
Ingo Molnare9eee032009-04-08 12:31:17 +02001365 u64 cap;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001366
1367 rdmsrl(MSR_IA32_MCG_CAP, cap);
Thomas Gleixner01c66802009-04-08 12:31:24 +02001368
1369 b = cap & MCG_BANKCNT_MASK;
Roland Dreier93ae5012009-10-15 14:21:14 -07001370 if (!banks)
Joe Perchesc767a542012-05-21 19:50:07 -07001371 pr_info("CPU supports %d MCE banks\n", b);
Ingo Molnarb6592942009-04-08 12:31:27 +02001372
Andi Kleen0d7482e32009-02-17 23:07:13 +01001373 if (b > MAX_NR_BANKS) {
Joe Perchesc767a542012-05-21 19:50:07 -07001374 pr_warn("Using only %u machine check banks out of %u\n",
Andi Kleen0d7482e32009-02-17 23:07:13 +01001375 MAX_NR_BANKS, b);
1376 b = MAX_NR_BANKS;
1377 }
1378
1379 /* Don't support asymmetric configurations today */
1380 WARN_ON(banks != 0 && b != banks);
1381 banks = b;
Andi Kleencebe1822009-07-09 00:31:43 +02001382 if (!mce_banks) {
Hidetoshi Setocffd3772009-11-12 15:52:40 +09001383 int err = __mcheck_cpu_mce_banks_init();
Ingo Molnar11868a22009-09-23 17:49:55 +02001384
Andi Kleencebe1822009-07-09 00:31:43 +02001385 if (err)
1386 return err;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001387 }
1388
1389 /* Use accurate RIP reporting if available. */
Thomas Gleixner01c66802009-04-08 12:31:24 +02001390 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
Andi Kleen0d7482e32009-02-17 23:07:13 +01001391 rip_msr = MSR_IA32_MCG_EIP;
1392
Andi Kleened7290d2009-05-27 21:56:57 +02001393 if (cap & MCG_SER_P)
1394 mce_ser = 1;
1395
Andi Kleen0d7482e32009-02-17 23:07:13 +01001396 return 0;
1397}
1398
Borislav Petkov5e099542009-10-16 12:31:32 +02001399static void __mcheck_cpu_init_generic(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400{
Ingo Molnare9eee032009-04-08 12:31:17 +02001401 mce_banks_t all_banks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 u64 cap;
1403 int i;
1404
Andi Kleenb79109c2009-02-12 13:43:23 +01001405 /*
1406 * Log the machine checks left over from the previous reset.
1407 */
Andi Kleenee031c32009-02-12 13:49:34 +01001408 bitmap_fill(all_banks, MAX_NR_BANKS);
Andi Kleen5679af42009-04-07 17:06:55 +02001409 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
1411 set_in_cr4(X86_CR4_MCE);
1412
Andi Kleen0d7482e32009-02-17 23:07:13 +01001413 rdmsrl(MSR_IA32_MCG_CAP, cap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 if (cap & MCG_CTL_P)
1415 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1416
1417 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02001418 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02001419
Andi Kleencebe1822009-07-09 00:31:43 +02001420 if (!b->init)
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001421 continue;
Andi Kleena2d32bc2009-07-09 00:31:44 +02001422 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1423 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425}
1426
Tony Luck61b0fcc2012-07-19 11:28:46 -07001427/*
1428 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1429 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1430 * Vol 3B Table 15-20). But this confuses both the code that determines
1431 * whether the machine check occurred in kernel or user mode, and also
1432 * the severity assessment code. Pretend that EIPV was set, and take the
1433 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1434 */
1435static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1436{
1437 if (bank != 0)
1438 return;
1439 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1440 return;
1441 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1442 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1443 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1444 MCACOD)) !=
1445 (MCI_STATUS_UC|MCI_STATUS_EN|
1446 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1447 MCI_STATUS_AR|MCACOD_INSTR))
1448 return;
1449
1450 m->mcgstatus |= MCG_STATUS_EIPV;
1451 m->ip = regs->ip;
1452 m->cs = regs->cs;
1453}
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455/* Add per CPU specific workarounds here */
Borislav Petkov5e099542009-10-16 12:31:32 +02001456static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001457{
Ingo Molnare412cd22009-08-17 10:19:00 +02001458 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
Joe Perchesc767a542012-05-21 19:50:07 -07001459 pr_info("unknown CPU type - not enabling MCE support\n");
Ingo Molnare412cd22009-08-17 10:19:00 +02001460 return -EOPNOTSUPP;
1461 }
1462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 /* This should be disabled by the BIOS, but isn't always */
Jan Beulich911f6a72008-04-22 16:22:21 +01001464 if (c->x86_vendor == X86_VENDOR_AMD) {
Ingo Molnare9eee032009-04-08 12:31:17 +02001465 if (c->x86 == 15 && banks > 4) {
1466 /*
1467 * disable GART TBL walk error reporting, which
1468 * trips off incorrectly with the IOMMU & 3ware
1469 * & Cerberus:
1470 */
Andi Kleencebe1822009-07-09 00:31:43 +02001471 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
Ingo Molnare9eee032009-04-08 12:31:17 +02001472 }
1473 if (c->x86 <= 17 && mce_bootlog < 0) {
1474 /*
1475 * Lots of broken BIOS around that don't clear them
1476 * by default and leave crap in there. Don't log:
1477 */
Jan Beulich911f6a72008-04-22 16:22:21 +01001478 mce_bootlog = 0;
Ingo Molnare9eee032009-04-08 12:31:17 +02001479 }
Andi Kleen2e6f6942009-04-27 18:42:48 +02001480 /*
1481 * Various K7s with broken bank 0 around. Always disable
1482 * by default.
1483 */
Andi Kleen203abd62009-06-15 14:52:01 +02001484 if (c->x86 == 6 && banks > 0)
Andi Kleencebe1822009-07-09 00:31:43 +02001485 mce_banks[0].ctl = 0;
Borislav Petkov575203b2012-04-20 18:01:34 +02001486
1487 /*
1488 * Turn off MC4_MISC thresholding banks on those models since
1489 * they're not supported there.
1490 */
1491 if (c->x86 == 0x15 &&
1492 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1493 int i;
1494 u64 val, hwcr;
1495 bool need_toggle;
1496 u32 msrs[] = {
1497 0x00000413, /* MC4_MISC0 */
1498 0xc0000408, /* MC4_MISC1 */
1499 };
1500
1501 rdmsrl(MSR_K7_HWCR, hwcr);
1502
1503 /* McStatusWrEn has to be set */
1504 need_toggle = !(hwcr & BIT(18));
1505
1506 if (need_toggle)
1507 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1508
1509 for (i = 0; i < ARRAY_SIZE(msrs); i++) {
1510 rdmsrl(msrs[i], val);
1511
1512 /* CntP bit set? */
Borislav Petkov80f0336102012-05-22 12:53:46 +02001513 if (val & BIT_64(62)) {
1514 val &= ~BIT_64(62);
1515 wrmsrl(msrs[i], val);
Borislav Petkov575203b2012-04-20 18:01:34 +02001516 }
1517 }
1518
1519 /* restore old settings */
1520 if (need_toggle)
1521 wrmsrl(MSR_K7_HWCR, hwcr);
1522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 }
Andi Kleene5835382005-11-05 17:25:54 +01001524
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001525 if (c->x86_vendor == X86_VENDOR_INTEL) {
1526 /*
1527 * SDM documents that on family 6 bank 0 should not be written
1528 * because it aliases to another special BIOS controlled
1529 * register.
1530 * But it's not aliased anymore on model 0x1a+
1531 * Don't ignore bank 0 completely because there could be a
1532 * valid event later, merely don't write CTL0.
1533 */
1534
Andi Kleencebe1822009-07-09 00:31:43 +02001535 if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
1536 mce_banks[0].init = 0;
Andi Kleen3c079792009-05-27 21:56:55 +02001537
1538 /*
1539 * All newer Intel systems support MCE broadcasting. Enable
1540 * synchronization with a one second timeout.
1541 */
1542 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1543 monarch_timeout < 0)
1544 monarch_timeout = USEC_PER_SEC;
Bartlomiej Zolnierkiewiczc7f6fa42009-07-28 23:52:54 +02001545
Ingo Molnare412cd22009-08-17 10:19:00 +02001546 /*
1547 * There are also broken BIOSes on some Pentium M and
1548 * earlier systems:
1549 */
1550 if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
Bartlomiej Zolnierkiewiczc7f6fa42009-07-28 23:52:54 +02001551 mce_bootlog = 0;
Tony Luck61b0fcc2012-07-19 11:28:46 -07001552
1553 if (c->x86 == 6 && c->x86_model == 45)
1554 quirk_no_way_out = quirk_sandybridge_ifu;
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001555 }
Andi Kleen3c079792009-05-27 21:56:55 +02001556 if (monarch_timeout < 0)
1557 monarch_timeout = 0;
Andi Kleen29b0f592009-05-27 21:56:56 +02001558 if (mce_bootlog != 0)
1559 mce_panic_timeout = 30;
Ingo Molnare412cd22009-08-17 10:19:00 +02001560
1561 return 0;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001562}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Hidetoshi Seto3a97fc32011-06-08 10:58:35 +09001564static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
Andi Kleen4efc0672009-04-28 19:07:31 +02001565{
1566 if (c->x86 != 5)
Hidetoshi Seto3a97fc32011-06-08 10:58:35 +09001567 return 0;
1568
Andi Kleen4efc0672009-04-28 19:07:31 +02001569 switch (c->x86_vendor) {
1570 case X86_VENDOR_INTEL:
Hidetoshi Setoc6978362009-06-15 17:22:49 +09001571 intel_p5_mcheck_init(c);
Hidetoshi Seto3a97fc32011-06-08 10:58:35 +09001572 return 1;
Andi Kleen4efc0672009-04-28 19:07:31 +02001573 break;
1574 case X86_VENDOR_CENTAUR:
1575 winchip_mcheck_init(c);
Hidetoshi Seto3a97fc32011-06-08 10:58:35 +09001576 return 1;
Andi Kleen4efc0672009-04-28 19:07:31 +02001577 break;
1578 }
Hidetoshi Seto3a97fc32011-06-08 10:58:35 +09001579
1580 return 0;
Andi Kleen4efc0672009-04-28 19:07:31 +02001581}
1582
Borislav Petkov5e099542009-10-16 12:31:32 +02001583static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584{
1585 switch (c->x86_vendor) {
1586 case X86_VENDOR_INTEL:
1587 mce_intel_feature_init(c);
1588 break;
Jacob Shin89b831e2005-11-05 17:25:53 +01001589 case X86_VENDOR_AMD:
1590 mce_amd_feature_init(c);
1591 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 default:
1593 break;
1594 }
1595}
1596
Borislav Petkov5e099542009-10-16 12:31:32 +02001597static void __mcheck_cpu_init_timer(void)
Andi Kleen52d168e2009-02-12 13:39:29 +01001598{
1599 struct timer_list *t = &__get_cpu_var(mce_timer);
Thomas Gleixner1a87fc12012-06-06 11:33:21 +02001600 unsigned long iv = check_interval * HZ;
Andi Kleen52d168e2009-02-12 13:39:29 +01001601
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001602 setup_timer(t, mce_timer_fn, smp_processor_id());
Jan Beulichbc09eff2009-12-08 11:21:37 +09001603
Hidetoshi Seto62fdac52009-06-11 16:06:07 +09001604 if (mce_ignore_ce)
1605 return;
1606
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001607 __this_cpu_write(mce_next_interval, iv);
1608 if (!iv)
Andi Kleen52d168e2009-02-12 13:39:29 +01001609 return;
Thomas Gleixner82f7af02012-05-24 17:54:51 +00001610 t->expires = round_jiffies(jiffies + iv);
Hidetoshi Seto5be60662009-06-24 09:21:10 +09001611 add_timer_on(t, smp_processor_id());
Andi Kleen52d168e2009-02-12 13:39:29 +01001612}
1613
Andi Kleen9eda8cb2009-07-09 00:31:42 +02001614/* Handle unconfigured int18 (should never happen) */
1615static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1616{
Joe Perchesc767a542012-05-21 19:50:07 -07001617 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
Andi Kleen9eda8cb2009-07-09 00:31:42 +02001618 smp_processor_id());
1619}
1620
1621/* Call the installed machine check handler for this CPU setup. */
1622void (*machine_check_vector)(struct pt_regs *, long error_code) =
1623 unexpected_machine_check;
1624
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001625/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 * Called for each booted CPU to set up machine checks.
Ingo Molnare9eee032009-04-08 12:31:17 +02001627 * Must be called with preempt off:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 */
Borislav Petkov5e099542009-10-16 12:31:32 +02001629void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630{
Andi Kleen4efc0672009-04-28 19:07:31 +02001631 if (mce_disabled)
1632 return;
1633
Hidetoshi Seto3a97fc32011-06-08 10:58:35 +09001634 if (__mcheck_cpu_ancient_init(c))
1635 return;
Andi Kleen4efc0672009-04-28 19:07:31 +02001636
Andi Kleen5b4408f2009-02-12 13:39:30 +01001637 if (!mce_available(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 return;
1639
Borislav Petkov5e099542009-10-16 12:31:32 +02001640 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
Andi Kleen04b2b1a2009-04-28 22:50:19 +02001641 mce_disabled = 1;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001642 return;
1643 }
Andi Kleen0d7482e32009-02-17 23:07:13 +01001644
Andi Kleen5d727922009-04-27 19:25:48 +02001645 machine_check_vector = do_machine_check;
1646
Borislav Petkov5e099542009-10-16 12:31:32 +02001647 __mcheck_cpu_init_generic();
1648 __mcheck_cpu_init_vendor(c);
1649 __mcheck_cpu_init_timer();
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001650 INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
Hidetoshi Setob77e70b2011-06-08 10:56:02 +09001651 init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652}
1653
1654/*
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001655 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 */
1657
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001658static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1659static int mce_chrdev_open_count; /* #times opened */
1660static int mce_chrdev_open_exclu; /* already open exclusive? */
Tim Hockinf528e7b2007-07-21 17:10:35 +02001661
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001662static int mce_chrdev_open(struct inode *inode, struct file *file)
Tim Hockinf528e7b2007-07-21 17:10:35 +02001663{
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001664 spin_lock(&mce_chrdev_state_lock);
Tim Hockinf528e7b2007-07-21 17:10:35 +02001665
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001666 if (mce_chrdev_open_exclu ||
1667 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1668 spin_unlock(&mce_chrdev_state_lock);
Ingo Molnare9eee032009-04-08 12:31:17 +02001669
Tim Hockinf528e7b2007-07-21 17:10:35 +02001670 return -EBUSY;
1671 }
1672
1673 if (file->f_flags & O_EXCL)
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001674 mce_chrdev_open_exclu = 1;
1675 mce_chrdev_open_count++;
Tim Hockinf528e7b2007-07-21 17:10:35 +02001676
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001677 spin_unlock(&mce_chrdev_state_lock);
Tim Hockinf528e7b2007-07-21 17:10:35 +02001678
Tim Hockinbd784322007-07-21 17:10:37 +02001679 return nonseekable_open(inode, file);
Tim Hockinf528e7b2007-07-21 17:10:35 +02001680}
1681
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001682static int mce_chrdev_release(struct inode *inode, struct file *file)
Tim Hockinf528e7b2007-07-21 17:10:35 +02001683{
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001684 spin_lock(&mce_chrdev_state_lock);
Tim Hockinf528e7b2007-07-21 17:10:35 +02001685
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001686 mce_chrdev_open_count--;
1687 mce_chrdev_open_exclu = 0;
Tim Hockinf528e7b2007-07-21 17:10:35 +02001688
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001689 spin_unlock(&mce_chrdev_state_lock);
Tim Hockinf528e7b2007-07-21 17:10:35 +02001690
1691 return 0;
1692}
1693
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001694static void collect_tscs(void *data)
1695{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 unsigned long *cpu_tsc = (unsigned long *)data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001698 rdtscll(cpu_tsc[smp_processor_id()]);
1699}
1700
Huang Ying482908b2010-05-18 14:35:22 +08001701static int mce_apei_read_done;
1702
1703/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1704static int __mce_read_apei(char __user **ubuf, size_t usize)
1705{
1706 int rc;
1707 u64 record_id;
1708 struct mce m;
1709
1710 if (usize < sizeof(struct mce))
1711 return -EINVAL;
1712
1713 rc = apei_read_mce(&m, &record_id);
1714 /* Error or no more MCE record */
1715 if (rc <= 0) {
1716 mce_apei_read_done = 1;
Naoya Horiguchifadd85f2012-01-23 15:54:52 -05001717 /*
1718 * When ERST is disabled, mce_chrdev_read() should return
1719 * "no record" instead of "no device."
1720 */
1721 if (rc == -ENODEV)
1722 return 0;
Huang Ying482908b2010-05-18 14:35:22 +08001723 return rc;
1724 }
1725 rc = -EFAULT;
1726 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1727 return rc;
1728 /*
1729 * In fact, we should have cleared the record after that has
1730 * been flushed to the disk or sent to network in
1731 * /sbin/mcelog, but we have no interface to support that now,
1732 * so just clear it to avoid duplication.
1733 */
1734 rc = apei_clear_mce(record_id);
1735 if (rc) {
1736 mce_apei_read_done = 1;
1737 return rc;
1738 }
1739 *ubuf += sizeof(struct mce);
1740
1741 return 0;
1742}
1743
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001744static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1745 size_t usize, loff_t *off)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 char __user *buf = ubuf;
Ingo Molnare9eee032009-04-08 12:31:17 +02001748 unsigned long *cpu_tsc;
1749 unsigned prev, next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 int i, err;
1751
Mike Travis6bca67f2008-07-18 18:11:27 -07001752 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
Andi Kleenf0de53b2005-04-16 15:25:10 -07001753 if (!cpu_tsc)
1754 return -ENOMEM;
1755
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001756 mutex_lock(&mce_chrdev_read_mutex);
Huang Ying482908b2010-05-18 14:35:22 +08001757
1758 if (!mce_apei_read_done) {
1759 err = __mce_read_apei(&buf, usize);
1760 if (err || buf != ubuf)
1761 goto out;
1762 }
1763
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -08001764 next = rcu_dereference_check_mce(mcelog.next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
1766 /* Only supports full reads right now */
Huang Ying482908b2010-05-18 14:35:22 +08001767 err = -EINVAL;
1768 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1769 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771 err = 0;
Huang Yingef41df4342009-02-12 13:39:34 +01001772 prev = 0;
1773 do {
1774 for (i = prev; i < next; i++) {
1775 unsigned long start = jiffies;
Hidetoshi Seto559faa62011-06-08 11:00:08 +09001776 struct mce *m = &mcelog.entry[i];
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001777
Hidetoshi Seto559faa62011-06-08 11:00:08 +09001778 while (!m->finished) {
Huang Yingef41df4342009-02-12 13:39:34 +01001779 if (time_after_eq(jiffies, start + 2)) {
Hidetoshi Seto559faa62011-06-08 11:00:08 +09001780 memset(m, 0, sizeof(*m));
Huang Yingef41df4342009-02-12 13:39:34 +01001781 goto timeout;
1782 }
1783 cpu_relax();
Andi Kleen673242c2005-09-12 18:49:24 +02001784 }
Huang Yingef41df4342009-02-12 13:39:34 +01001785 smp_rmb();
Hidetoshi Seto559faa62011-06-08 11:00:08 +09001786 err |= copy_to_user(buf, m, sizeof(*m));
1787 buf += sizeof(*m);
Huang Yingef41df4342009-02-12 13:39:34 +01001788timeout:
1789 ;
Andi Kleen673242c2005-09-12 18:49:24 +02001790 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Huang Yingef41df4342009-02-12 13:39:34 +01001792 memset(mcelog.entry + prev, 0,
1793 (next - prev) * sizeof(struct mce));
1794 prev = next;
1795 next = cmpxchg(&mcelog.next, prev, 0);
1796 } while (next != prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797
Paul E. McKenneyb2b18662005-06-25 14:55:38 -07001798 synchronize_sched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001800 /*
1801 * Collect entries that were still getting written before the
1802 * synchronize.
1803 */
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001804 on_each_cpu(collect_tscs, cpu_tsc, 1);
Ingo Molnare9eee032009-04-08 12:31:17 +02001805
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001806 for (i = next; i < MCE_LOG_LEN; i++) {
Hidetoshi Seto559faa62011-06-08 11:00:08 +09001807 struct mce *m = &mcelog.entry[i];
1808
1809 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1810 err |= copy_to_user(buf, m, sizeof(*m));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 smp_rmb();
Hidetoshi Seto559faa62011-06-08 11:00:08 +09001812 buf += sizeof(*m);
1813 memset(m, 0, sizeof(*m));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 }
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001815 }
Huang Ying482908b2010-05-18 14:35:22 +08001816
1817 if (err)
1818 err = -EFAULT;
1819
1820out:
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001821 mutex_unlock(&mce_chrdev_read_mutex);
Andi Kleenf0de53b2005-04-16 15:25:10 -07001822 kfree(cpu_tsc);
Ingo Molnare9eee032009-04-08 12:31:17 +02001823
Huang Ying482908b2010-05-18 14:35:22 +08001824 return err ? err : buf - ubuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825}
1826
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001827static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
Tim Hockine02e68d2007-07-21 17:10:36 +02001828{
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001829 poll_wait(file, &mce_chrdev_wait, wait);
Paul E. McKenneya4dd9922011-04-01 07:15:14 -07001830 if (rcu_access_index(mcelog.next))
Tim Hockine02e68d2007-07-21 17:10:36 +02001831 return POLLIN | POLLRDNORM;
Huang Ying482908b2010-05-18 14:35:22 +08001832 if (!mce_apei_read_done && apei_check_mce())
1833 return POLLIN | POLLRDNORM;
Tim Hockine02e68d2007-07-21 17:10:36 +02001834 return 0;
1835}
1836
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001837static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1838 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839{
1840 int __user *p = (int __user *)arg;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001841
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 if (!capable(CAP_SYS_ADMIN))
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001843 return -EPERM;
Ingo Molnare9eee032009-04-08 12:31:17 +02001844
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 switch (cmd) {
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001846 case MCE_GET_RECORD_LEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 return put_user(sizeof(struct mce), p);
1848 case MCE_GET_LOG_LEN:
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001849 return put_user(MCE_LOG_LEN, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 case MCE_GETCLEAR_FLAGS: {
1851 unsigned flags;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001852
1853 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 flags = mcelog.flags;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001855 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
Ingo Molnare9eee032009-04-08 12:31:17 +02001856
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001857 return put_user(flags, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 }
1859 default:
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001860 return -ENOTTY;
1861 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862}
1863
Luck, Tony66f5ddf2011-11-03 11:46:47 -07001864static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1865 size_t usize, loff_t *off);
1866
1867void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1868 const char __user *ubuf,
1869 size_t usize, loff_t *off))
1870{
1871 mce_write = fn;
1872}
1873EXPORT_SYMBOL_GPL(register_mce_write_callback);
1874
1875ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1876 size_t usize, loff_t *off)
1877{
1878 if (mce_write)
1879 return mce_write(filp, ubuf, usize, off);
1880 else
1881 return -EINVAL;
1882}
1883
1884static const struct file_operations mce_chrdev_ops = {
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001885 .open = mce_chrdev_open,
1886 .release = mce_chrdev_release,
1887 .read = mce_chrdev_read,
Luck, Tony66f5ddf2011-11-03 11:46:47 -07001888 .write = mce_chrdev_write,
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001889 .poll = mce_chrdev_poll,
1890 .unlocked_ioctl = mce_chrdev_ioctl,
1891 .llseek = no_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892};
1893
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09001894static struct miscdevice mce_chrdev_device = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 MISC_MCELOG_MINOR,
1896 "mcelog",
1897 &mce_chrdev_ops,
1898};
1899
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001900/*
Hidetoshi Seto62fdac52009-06-11 16:06:07 +09001901 * mce=off Disables machine check
1902 * mce=no_cmci Disables CMCI
1903 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1904 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
Andi Kleen3c079792009-05-27 21:56:55 +02001905 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1906 * monarchtimeout is how long to wait for other CPUs on machine
1907 * check, or 0 to not wait
Hidetoshi Seto13503fa2009-03-26 17:39:20 +09001908 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1909 * mce=nobootlog Don't log MCEs from before booting.
1910 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911static int __init mcheck_enable(char *str)
1912{
Bartlomiej Zolnierkiewicze3346fc2009-07-28 23:55:09 +02001913 if (*str == 0) {
Andi Kleen4efc0672009-04-28 19:07:31 +02001914 enable_p5_mce();
Bartlomiej Zolnierkiewicze3346fc2009-07-28 23:55:09 +02001915 return 1;
1916 }
Andi Kleen4efc0672009-04-28 19:07:31 +02001917 if (*str == '=')
1918 str++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 if (!strcmp(str, "off"))
Andi Kleen04b2b1a2009-04-28 22:50:19 +02001920 mce_disabled = 1;
Hidetoshi Seto62fdac52009-06-11 16:06:07 +09001921 else if (!strcmp(str, "no_cmci"))
1922 mce_cmci_disabled = 1;
1923 else if (!strcmp(str, "dont_log_ce"))
1924 mce_dont_log_ce = 1;
1925 else if (!strcmp(str, "ignore_ce"))
1926 mce_ignore_ce = 1;
Hidetoshi Seto13503fa2009-03-26 17:39:20 +09001927 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1928 mce_bootlog = (str[0] == 'b');
Andi Kleen3c079792009-05-27 21:56:55 +02001929 else if (isdigit(str[0])) {
Andi Kleen8c566ef2005-09-12 18:49:24 +02001930 get_option(&str, &tolerant);
Andi Kleen3c079792009-05-27 21:56:55 +02001931 if (*str == ',') {
1932 ++str;
1933 get_option(&str, &monarch_timeout);
1934 }
1935 } else {
Joe Perchesc767a542012-05-21 19:50:07 -07001936 pr_info("mce argument %s ignored. Please use /sys\n", str);
Hidetoshi Seto13503fa2009-03-26 17:39:20 +09001937 return 0;
1938 }
OGAWA Hirofumi9b410462006-03-31 02:30:33 -08001939 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940}
Andi Kleen4efc0672009-04-28 19:07:31 +02001941__setup("mce", mcheck_enable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Yong Wanga2202aa2009-11-10 09:38:24 +08001943int __init mcheck_init(void)
Borislav Petkovb33a6362009-10-16 12:31:33 +02001944{
Yong Wanga2202aa2009-11-10 09:38:24 +08001945 mcheck_intel_therm_init();
1946
Borislav Petkovb33a6362009-10-16 12:31:33 +02001947 return 0;
1948}
Borislav Petkovb33a6362009-10-16 12:31:33 +02001949
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001950/*
Hidetoshi Setoc7cece82011-06-08 11:02:03 +09001951 * mce_syscore: PM support
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001952 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
Andi Kleen973a2dd2009-02-12 13:39:32 +01001954/*
1955 * Disable machine checks on suspend and shutdown. We can't really handle
1956 * them later.
1957 */
Borislav Petkov5e099542009-10-16 12:31:32 +02001958static int mce_disable_error_reporting(void)
Andi Kleen973a2dd2009-02-12 13:39:32 +01001959{
1960 int i;
1961
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001962 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02001963 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02001964
Andi Kleencebe1822009-07-09 00:31:43 +02001965 if (b->init)
Andi Kleena2d32bc2009-07-09 00:31:44 +02001966 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001967 }
Andi Kleen973a2dd2009-02-12 13:39:32 +01001968 return 0;
1969}
1970
Hidetoshi Setoc7cece82011-06-08 11:02:03 +09001971static int mce_syscore_suspend(void)
Andi Kleen973a2dd2009-02-12 13:39:32 +01001972{
Borislav Petkov5e099542009-10-16 12:31:32 +02001973 return mce_disable_error_reporting();
Andi Kleen973a2dd2009-02-12 13:39:32 +01001974}
1975
Hidetoshi Setoc7cece82011-06-08 11:02:03 +09001976static void mce_syscore_shutdown(void)
Andi Kleen973a2dd2009-02-12 13:39:32 +01001977{
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01001978 mce_disable_error_reporting();
Andi Kleen973a2dd2009-02-12 13:39:32 +01001979}
1980
Ingo Molnare9eee032009-04-08 12:31:17 +02001981/*
1982 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
1983 * Only one CPU is active at this time, the others get re-added later using
1984 * CPU hotplug:
1985 */
Hidetoshi Setoc7cece82011-06-08 11:02:03 +09001986static void mce_syscore_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987{
Borislav Petkov5e099542009-10-16 12:31:32 +02001988 __mcheck_cpu_init_generic();
Tejun Heo7b543a52010-12-18 16:30:05 +01001989 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990}
1991
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01001992static struct syscore_ops mce_syscore_ops = {
Hidetoshi Setoc7cece82011-06-08 11:02:03 +09001993 .suspend = mce_syscore_suspend,
1994 .shutdown = mce_syscore_shutdown,
1995 .resume = mce_syscore_resume,
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01001996};
1997
Hidetoshi Setoc7cece82011-06-08 11:02:03 +09001998/*
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001999 * mce_device: Sysfs support
Hidetoshi Setoc7cece82011-06-08 11:02:03 +09002000 */
2001
Andi Kleen52d168e2009-02-12 13:39:29 +01002002static void mce_cpu_restart(void *data)
2003{
Tejun Heo7b543a52010-12-18 16:30:05 +01002004 if (!mce_available(__this_cpu_ptr(&cpu_info)))
Hidetoshi Seto33edbf02009-06-15 17:18:45 +09002005 return;
Borislav Petkov5e099542009-10-16 12:31:32 +02002006 __mcheck_cpu_init_generic();
2007 __mcheck_cpu_init_timer();
Andi Kleen52d168e2009-02-12 13:39:29 +01002008}
2009
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010/* Reinit MCEs after user configuration changes */
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02002011static void mce_restart(void)
2012{
Hidetoshi Seto9aaef962011-06-17 04:40:36 -04002013 mce_timer_delete_all();
Andi Kleen52d168e2009-02-12 13:39:29 +01002014 on_each_cpu(mce_cpu_restart, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015}
2016
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002017/* Toggle features for corrected errors */
Hidetoshi Seto9aaef962011-06-17 04:40:36 -04002018static void mce_disable_cmci(void *data)
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002019{
Tejun Heo7b543a52010-12-18 16:30:05 +01002020 if (!mce_available(__this_cpu_ptr(&cpu_info)))
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002021 return;
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002022 cmci_clear();
2023}
2024
2025static void mce_enable_ce(void *all)
2026{
Tejun Heo7b543a52010-12-18 16:30:05 +01002027 if (!mce_available(__this_cpu_ptr(&cpu_info)))
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002028 return;
2029 cmci_reenable();
2030 cmci_recheck();
2031 if (all)
Borislav Petkov5e099542009-10-16 12:31:32 +02002032 __mcheck_cpu_init_timer();
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002033}
2034
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002035static struct bus_type mce_subsys = {
Ingo Molnare9eee032009-04-08 12:31:17 +02002036 .name = "machinecheck",
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002037 .dev_name = "machinecheck",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038};
2039
Greg Kroah-Hartmand6126ef2012-01-26 15:49:14 -08002040DEFINE_PER_CPU(struct device *, mce_device);
Ingo Molnare9eee032009-04-08 12:31:17 +02002041
2042__cpuinitdata
2043void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002045static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
Andi Kleencebe1822009-07-09 00:31:43 +02002046{
2047 return container_of(attr, struct mce_bank, attr);
2048}
Andi Kleen0d7482e32009-02-17 23:07:13 +01002049
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002050static ssize_t show_bank(struct device *s, struct device_attribute *attr,
Andi Kleen0d7482e32009-02-17 23:07:13 +01002051 char *buf)
2052{
Andi Kleencebe1822009-07-09 00:31:43 +02002053 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
Andi Kleen0d7482e32009-02-17 23:07:13 +01002054}
2055
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002056static ssize_t set_bank(struct device *s, struct device_attribute *attr,
Hidetoshi Seto9319cec2009-04-14 17:26:30 +09002057 const char *buf, size_t size)
Andi Kleen0d7482e32009-02-17 23:07:13 +01002058{
Hidetoshi Seto9319cec2009-04-14 17:26:30 +09002059 u64 new;
Ingo Molnare9eee032009-04-08 12:31:17 +02002060
Hidetoshi Seto9319cec2009-04-14 17:26:30 +09002061 if (strict_strtoull(buf, 0, &new) < 0)
Andi Kleen0d7482e32009-02-17 23:07:13 +01002062 return -EINVAL;
Ingo Molnare9eee032009-04-08 12:31:17 +02002063
Andi Kleencebe1822009-07-09 00:31:43 +02002064 attr_to_bank(attr)->ctl = new;
Andi Kleen0d7482e32009-02-17 23:07:13 +01002065 mce_restart();
Ingo Molnare9eee032009-04-08 12:31:17 +02002066
Hidetoshi Seto9319cec2009-04-14 17:26:30 +09002067 return size;
Andi Kleen0d7482e32009-02-17 23:07:13 +01002068}
Andi Kleena98f0dd2007-02-13 13:26:23 +01002069
Ingo Molnare9eee032009-04-08 12:31:17 +02002070static ssize_t
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002071show_trigger(struct device *s, struct device_attribute *attr, char *buf)
Andi Kleena98f0dd2007-02-13 13:26:23 +01002072{
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09002073 strcpy(buf, mce_helper);
Andi Kleena98f0dd2007-02-13 13:26:23 +01002074 strcat(buf, "\n");
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09002075 return strlen(mce_helper) + 1;
Andi Kleena98f0dd2007-02-13 13:26:23 +01002076}
2077
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002078static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
Ingo Molnare9eee032009-04-08 12:31:17 +02002079 const char *buf, size_t siz)
Andi Kleena98f0dd2007-02-13 13:26:23 +01002080{
2081 char *p;
Ingo Molnare9eee032009-04-08 12:31:17 +02002082
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09002083 strncpy(mce_helper, buf, sizeof(mce_helper));
2084 mce_helper[sizeof(mce_helper)-1] = 0;
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09002085 p = strchr(mce_helper, '\n');
Ingo Molnare9eee032009-04-08 12:31:17 +02002086
Jan Beuliche9084ec2009-07-16 09:45:11 +01002087 if (p)
Ingo Molnare9eee032009-04-08 12:31:17 +02002088 *p = 0;
2089
Jan Beuliche9084ec2009-07-16 09:45:11 +01002090 return strlen(mce_helper) + !!p;
Andi Kleena98f0dd2007-02-13 13:26:23 +01002091}
2092
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002093static ssize_t set_ignore_ce(struct device *s,
2094 struct device_attribute *attr,
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002095 const char *buf, size_t size)
2096{
2097 u64 new;
2098
2099 if (strict_strtoull(buf, 0, &new) < 0)
2100 return -EINVAL;
2101
2102 if (mce_ignore_ce ^ !!new) {
2103 if (new) {
2104 /* disable ce features */
Hidetoshi Seto9aaef962011-06-17 04:40:36 -04002105 mce_timer_delete_all();
2106 on_each_cpu(mce_disable_cmci, NULL, 1);
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002107 mce_ignore_ce = 1;
2108 } else {
2109 /* enable ce features */
2110 mce_ignore_ce = 0;
2111 on_each_cpu(mce_enable_ce, (void *)1, 1);
2112 }
2113 }
2114 return size;
2115}
2116
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002117static ssize_t set_cmci_disabled(struct device *s,
2118 struct device_attribute *attr,
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002119 const char *buf, size_t size)
2120{
2121 u64 new;
2122
2123 if (strict_strtoull(buf, 0, &new) < 0)
2124 return -EINVAL;
2125
2126 if (mce_cmci_disabled ^ !!new) {
2127 if (new) {
2128 /* disable cmci */
Hidetoshi Seto9aaef962011-06-17 04:40:36 -04002129 on_each_cpu(mce_disable_cmci, NULL, 1);
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002130 mce_cmci_disabled = 1;
2131 } else {
2132 /* enable cmci */
2133 mce_cmci_disabled = 0;
2134 on_each_cpu(mce_enable_ce, NULL, 1);
2135 }
2136 }
2137 return size;
2138}
2139
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002140static ssize_t store_int_with_restart(struct device *s,
2141 struct device_attribute *attr,
Andi Kleenb56f6422009-05-27 21:56:52 +02002142 const char *buf, size_t size)
2143{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002144 ssize_t ret = device_store_int(s, attr, buf, size);
Andi Kleenb56f6422009-05-27 21:56:52 +02002145 mce_restart();
2146 return ret;
2147}
2148
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002149static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2150static DEVICE_INT_ATTR(tolerant, 0644, tolerant);
2151static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
2152static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
Ingo Molnare9eee032009-04-08 12:31:17 +02002153
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002154static struct dev_ext_attribute dev_attr_check_interval = {
2155 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
Andi Kleenb56f6422009-05-27 21:56:52 +02002156 &check_interval
2157};
Ingo Molnare9eee032009-04-08 12:31:17 +02002158
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002159static struct dev_ext_attribute dev_attr_ignore_ce = {
2160 __ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce),
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002161 &mce_ignore_ce
2162};
2163
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002164static struct dev_ext_attribute dev_attr_cmci_disabled = {
2165 __ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled),
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09002166 &mce_cmci_disabled
2167};
2168
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002169static struct device_attribute *mce_device_attrs[] = {
2170 &dev_attr_tolerant.attr,
2171 &dev_attr_check_interval.attr,
2172 &dev_attr_trigger,
2173 &dev_attr_monarch_timeout.attr,
2174 &dev_attr_dont_log_ce.attr,
2175 &dev_attr_ignore_ce.attr,
2176 &dev_attr_cmci_disabled.attr,
Andi Kleena98f0dd2007-02-13 13:26:23 +01002177 NULL
2178};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002180static cpumask_var_t mce_device_initialized;
Andreas Herrmannbae19fe2007-11-14 17:00:44 -08002181
Greg Kroah-Hartmane032d8072012-01-16 14:40:28 -08002182static void mce_device_release(struct device *dev)
2183{
2184 kfree(dev);
2185}
2186
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002187/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2188static __cpuinit int mce_device_create(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189{
Greg Kroah-Hartmane032d8072012-01-16 14:40:28 -08002190 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 int err;
Hidetoshi Setob1f49f92009-06-18 14:53:24 +09002192 int i, j;
Mike Travis92cb7612007-10-19 20:35:04 +02002193
Andreas Herrmann90367552007-11-07 02:12:58 +01002194 if (!mce_available(&boot_cpu_data))
Andi Kleen91c6d402005-07-28 21:15:39 -07002195 return -EIO;
2196
Greg Kroah-Hartmane032d8072012-01-16 14:40:28 -08002197 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2198 if (!dev)
2199 return -ENOMEM;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002200 dev->id = cpu;
2201 dev->bus = &mce_subsys;
Greg Kroah-Hartmane032d8072012-01-16 14:40:28 -08002202 dev->release = &mce_device_release;
Andi Kleen91c6d402005-07-28 21:15:39 -07002203
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002204 err = device_register(dev);
Akinobu Mitad435d862007-10-18 03:05:15 -07002205 if (err)
2206 return err;
Andi Kleen91c6d402005-07-28 21:15:39 -07002207
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002208 for (i = 0; mce_device_attrs[i]; i++) {
2209 err = device_create_file(dev, mce_device_attrs[i]);
Akinobu Mitad435d862007-10-18 03:05:15 -07002210 if (err)
2211 goto error;
Andi Kleen91c6d402005-07-28 21:15:39 -07002212 }
Hidetoshi Setob1f49f92009-06-18 14:53:24 +09002213 for (j = 0; j < banks; j++) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002214 err = device_create_file(dev, &mce_banks[j].attr);
Andi Kleen0d7482e32009-02-17 23:07:13 +01002215 if (err)
2216 goto error2;
2217 }
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002218 cpumask_set_cpu(cpu, mce_device_initialized);
Greg Kroah-Hartmand6126ef2012-01-26 15:49:14 -08002219 per_cpu(mce_device, cpu) = dev;
Akinobu Mitad435d862007-10-18 03:05:15 -07002220
2221 return 0;
Andi Kleen0d7482e32009-02-17 23:07:13 +01002222error2:
Hidetoshi Setob1f49f92009-06-18 14:53:24 +09002223 while (--j >= 0)
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002224 device_remove_file(dev, &mce_banks[j].attr);
Akinobu Mitad435d862007-10-18 03:05:15 -07002225error:
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002226 while (--i >= 0)
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002227 device_remove_file(dev, mce_device_attrs[i]);
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002228
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002229 device_unregister(dev);
Akinobu Mitad435d862007-10-18 03:05:15 -07002230
Andi Kleen91c6d402005-07-28 21:15:39 -07002231 return err;
2232}
2233
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002234static __cpuinit void mce_device_remove(unsigned int cpu)
Andi Kleen91c6d402005-07-28 21:15:39 -07002235{
Greg Kroah-Hartmand6126ef2012-01-26 15:49:14 -08002236 struct device *dev = per_cpu(mce_device, cpu);
Shaohua Li73ca5352006-01-11 22:43:06 +01002237 int i;
2238
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002239 if (!cpumask_test_cpu(cpu, mce_device_initialized))
Andreas Herrmannbae19fe2007-11-14 17:00:44 -08002240 return;
2241
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002242 for (i = 0; mce_device_attrs[i]; i++)
2243 device_remove_file(dev, mce_device_attrs[i]);
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002244
Andi Kleen0d7482e32009-02-17 23:07:13 +01002245 for (i = 0; i < banks; i++)
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002246 device_remove_file(dev, &mce_banks[i].attr);
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002247
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002248 device_unregister(dev);
2249 cpumask_clear_cpu(cpu, mce_device_initialized);
Greg Kroah-Hartmand6126ef2012-01-26 15:49:14 -08002250 per_cpu(mce_device, cpu) = NULL;
Andi Kleen91c6d402005-07-28 21:15:39 -07002251}
Andi Kleen91c6d402005-07-28 21:15:39 -07002252
Andi Kleend6b75582009-02-12 13:39:31 +01002253/* Make sure there are no machine checks on offlined CPUs. */
Hidetoshi Seto767df1b2009-11-26 17:29:02 +09002254static void __cpuinit mce_disable_cpu(void *h)
Andi Kleend6b75582009-02-12 13:39:31 +01002255{
Andi Kleen88ccbed2009-02-12 13:49:36 +01002256 unsigned long action = *(unsigned long *)h;
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002257 int i;
Andi Kleend6b75582009-02-12 13:39:31 +01002258
Tejun Heo7b543a52010-12-18 16:30:05 +01002259 if (!mce_available(__this_cpu_ptr(&cpu_info)))
Andi Kleend6b75582009-02-12 13:39:31 +01002260 return;
Hidetoshi Seto767df1b2009-11-26 17:29:02 +09002261
Andi Kleen88ccbed2009-02-12 13:49:36 +01002262 if (!(action & CPU_TASKS_FROZEN))
2263 cmci_clear();
Andi Kleen06b7a7a2009-04-27 18:37:43 +02002264 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02002265 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02002266
Andi Kleencebe1822009-07-09 00:31:43 +02002267 if (b->init)
Andi Kleena2d32bc2009-07-09 00:31:44 +02002268 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
Andi Kleen06b7a7a2009-04-27 18:37:43 +02002269 }
Andi Kleend6b75582009-02-12 13:39:31 +01002270}
2271
Hidetoshi Seto767df1b2009-11-26 17:29:02 +09002272static void __cpuinit mce_reenable_cpu(void *h)
Andi Kleend6b75582009-02-12 13:39:31 +01002273{
Andi Kleen88ccbed2009-02-12 13:49:36 +01002274 unsigned long action = *(unsigned long *)h;
Ingo Molnare9eee032009-04-08 12:31:17 +02002275 int i;
Andi Kleend6b75582009-02-12 13:39:31 +01002276
Tejun Heo7b543a52010-12-18 16:30:05 +01002277 if (!mce_available(__this_cpu_ptr(&cpu_info)))
Andi Kleend6b75582009-02-12 13:39:31 +01002278 return;
Ingo Molnare9eee032009-04-08 12:31:17 +02002279
Andi Kleen88ccbed2009-02-12 13:49:36 +01002280 if (!(action & CPU_TASKS_FROZEN))
2281 cmci_reenable();
Andi Kleen06b7a7a2009-04-27 18:37:43 +02002282 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02002283 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02002284
Andi Kleencebe1822009-07-09 00:31:43 +02002285 if (b->init)
Andi Kleena2d32bc2009-07-09 00:31:44 +02002286 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
Andi Kleen06b7a7a2009-04-27 18:37:43 +02002287 }
Andi Kleend6b75582009-02-12 13:39:31 +01002288}
2289
Andi Kleen91c6d402005-07-28 21:15:39 -07002290/* Get notified when a cpu comes on/off. Be hotplug friendly. */
Ingo Molnare9eee032009-04-08 12:31:17 +02002291static int __cpuinit
2292mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
Andi Kleen91c6d402005-07-28 21:15:39 -07002293{
2294 unsigned int cpu = (unsigned long)hcpu;
Andi Kleen52d168e2009-02-12 13:39:29 +01002295 struct timer_list *t = &per_cpu(mce_timer, cpu);
Andi Kleen91c6d402005-07-28 21:15:39 -07002296
2297 switch (action) {
Andreas Herrmannbae19fe2007-11-14 17:00:44 -08002298 case CPU_ONLINE:
2299 case CPU_ONLINE_FROZEN:
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002300 mce_device_create(cpu);
Rafael J. Wysocki87357282008-08-22 22:23:09 +02002301 if (threshold_cpu_callback)
2302 threshold_cpu_callback(action, cpu);
Andi Kleen91c6d402005-07-28 21:15:39 -07002303 break;
Andi Kleen91c6d402005-07-28 21:15:39 -07002304 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07002305 case CPU_DEAD_FROZEN:
Rafael J. Wysocki87357282008-08-22 22:23:09 +02002306 if (threshold_cpu_callback)
2307 threshold_cpu_callback(action, cpu);
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002308 mce_device_remove(cpu);
Andi Kleen91c6d402005-07-28 21:15:39 -07002309 break;
Andi Kleen52d168e2009-02-12 13:39:29 +01002310 case CPU_DOWN_PREPARE:
2311 case CPU_DOWN_PREPARE_FROZEN:
2312 del_timer_sync(t);
Andi Kleen88ccbed2009-02-12 13:49:36 +01002313 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
Andi Kleen52d168e2009-02-12 13:39:29 +01002314 break;
2315 case CPU_DOWN_FAILED:
2316 case CPU_DOWN_FAILED_FROZEN:
Hidetoshi Setofe5ed912009-12-03 11:33:08 +09002317 if (!mce_ignore_ce && check_interval) {
2318 t->expires = round_jiffies(jiffies +
Thomas Gleixner82f7af02012-05-24 17:54:51 +00002319 per_cpu(mce_next_interval, cpu));
Hidetoshi Setofe5ed912009-12-03 11:33:08 +09002320 add_timer_on(t, cpu);
2321 }
Andi Kleen88ccbed2009-02-12 13:49:36 +01002322 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2323 break;
2324 case CPU_POST_DEAD:
2325 /* intentionally ignoring frozen here */
2326 cmci_rediscover(cpu);
Andi Kleen52d168e2009-02-12 13:39:29 +01002327 break;
Andi Kleen91c6d402005-07-28 21:15:39 -07002328 }
Andreas Herrmannbae19fe2007-11-14 17:00:44 -08002329 return NOTIFY_OK;
Andi Kleen91c6d402005-07-28 21:15:39 -07002330}
2331
Sam Ravnborg1e356692008-01-30 13:33:36 +01002332static struct notifier_block mce_cpu_notifier __cpuinitdata = {
Andi Kleen91c6d402005-07-28 21:15:39 -07002333 .notifier_call = mce_cpu_callback,
2334};
2335
Andi Kleencebe1822009-07-09 00:31:43 +02002336static __init void mce_init_banks(void)
Andi Kleen0d7482e32009-02-17 23:07:13 +01002337{
2338 int i;
2339
Andi Kleen0d7482e32009-02-17 23:07:13 +01002340 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02002341 struct mce_bank *b = &mce_banks[i];
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002342 struct device_attribute *a = &b->attr;
Ingo Molnare9eee032009-04-08 12:31:17 +02002343
Eric W. Biedermana07e4152010-02-11 15:23:05 -08002344 sysfs_attr_init(&a->attr);
Andi Kleencebe1822009-07-09 00:31:43 +02002345 a->attr.name = b->attrname;
2346 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
Ingo Molnare9eee032009-04-08 12:31:17 +02002347
2348 a->attr.mode = 0644;
2349 a->show = show_bank;
2350 a->store = set_bank;
Andi Kleen0d7482e32009-02-17 23:07:13 +01002351 }
Andi Kleen0d7482e32009-02-17 23:07:13 +01002352}
2353
Borislav Petkov5e099542009-10-16 12:31:32 +02002354static __init int mcheck_init_device(void)
Andi Kleen91c6d402005-07-28 21:15:39 -07002355{
2356 int err;
2357 int i = 0;
2358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (!mce_available(&boot_cpu_data))
2360 return -EIO;
Andi Kleen0d7482e32009-02-17 23:07:13 +01002361
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002362 zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
Rusty Russell996867d2009-03-13 14:49:51 +10302363
Andi Kleencebe1822009-07-09 00:31:43 +02002364 mce_init_banks();
Andi Kleen0d7482e32009-02-17 23:07:13 +01002365
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002366 err = subsys_system_register(&mce_subsys, NULL);
Akinobu Mitad435d862007-10-18 03:05:15 -07002367 if (err)
2368 return err;
Andi Kleen91c6d402005-07-28 21:15:39 -07002369
2370 for_each_online_cpu(i) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002371 err = mce_device_create(i);
Akinobu Mitad435d862007-10-18 03:05:15 -07002372 if (err)
2373 return err;
Andi Kleen91c6d402005-07-28 21:15:39 -07002374 }
2375
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +01002376 register_syscore_ops(&mce_syscore_ops);
Chandra Seetharamanbe6b5a32006-07-30 03:03:37 -07002377 register_hotcpu_notifier(&mce_cpu_notifier);
Hidetoshi Seto93b62c32011-06-08 11:00:45 +09002378
2379 /* register character device /dev/mcelog */
2380 misc_register(&mce_chrdev_device);
Ingo Molnare9eee032009-04-08 12:31:17 +02002381
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383}
Borislav Petkov5e099542009-10-16 12:31:32 +02002384device_initcall(mcheck_init_device);
Ingo Molnara988d332009-04-08 12:31:25 +02002385
Andi Kleend7c3c9a2009-04-28 23:07:25 +02002386/*
2387 * Old style boot options parsing. Only for compatibility.
2388 */
2389static int __init mcheck_disable(char *str)
2390{
2391 mce_disabled = 1;
2392 return 1;
2393}
2394__setup("nomce", mcheck_disable);
Huang Ying5be9ed22009-07-31 09:41:42 +08002395
2396#ifdef CONFIG_DEBUG_FS
2397struct dentry *mce_get_debugfs_dir(void)
2398{
2399 static struct dentry *dmce;
2400
2401 if (!dmce)
2402 dmce = debugfs_create_dir("mce", NULL);
2403
2404 return dmce;
2405}
Huang Yingbf783f92009-07-31 09:41:43 +08002406
2407static void mce_reset(void)
2408{
2409 cpu_missing = 0;
2410 atomic_set(&mce_fake_paniced, 0);
2411 atomic_set(&mce_executing, 0);
2412 atomic_set(&mce_callin, 0);
2413 atomic_set(&global_nwo, 0);
2414}
2415
2416static int fake_panic_get(void *data, u64 *val)
2417{
2418 *val = fake_panic;
2419 return 0;
2420}
2421
2422static int fake_panic_set(void *data, u64 val)
2423{
2424 mce_reset();
2425 fake_panic = val;
2426 return 0;
2427}
2428
2429DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2430 fake_panic_set, "%llu\n");
2431
Borislav Petkov5e099542009-10-16 12:31:32 +02002432static int __init mcheck_debugfs_init(void)
Huang Yingbf783f92009-07-31 09:41:43 +08002433{
2434 struct dentry *dmce, *ffake_panic;
2435
2436 dmce = mce_get_debugfs_dir();
2437 if (!dmce)
2438 return -ENOMEM;
2439 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2440 &fake_panic_fops);
2441 if (!ffake_panic)
2442 return -ENOMEM;
2443
2444 return 0;
2445}
Borislav Petkov5e099542009-10-16 12:31:32 +02002446late_initcall(mcheck_debugfs_init);
Huang Ying5be9ed22009-07-31 09:41:42 +08002447#endif