blob: 09535ca9b9d7d0ac14025abaa0d94e70f094499b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Machine check handler.
Ingo Molnare9eee032009-04-08 12:31:17 +02003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02005 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
Andi Kleenb79109c2009-02-12 13:43:23 +01007 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
Tim Hockine02e68d2007-07-21 17:10:36 +020010#include <linux/thread_info.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020011#include <linux/capability.h>
12#include <linux/miscdevice.h>
Andi Kleenccc3c312009-05-27 21:56:54 +020013#include <linux/interrupt.h>
Andi Kleen8457c842009-02-12 13:49:33 +010014#include <linux/ratelimit.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020015#include <linux/kallsyms.h>
16#include <linux/rcupdate.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020017#include <linux/kobject.h>
Hidetoshi Seto14a02532009-04-30 16:04:51 +090018#include <linux/uaccess.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020019#include <linux/kdebug.h>
20#include <linux/kernel.h>
21#include <linux/percpu.h>
22#include <linux/string.h>
23#include <linux/sysdev.h>
Andi Kleen3c079792009-05-27 21:56:55 +020024#include <linux/delay.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020025#include <linux/ctype.h>
26#include <linux/sched.h>
27#include <linux/sysfs.h>
28#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020030#include <linux/init.h>
31#include <linux/kmod.h>
32#include <linux/poll.h>
Andi Kleen3c079792009-05-27 21:56:55 +020033#include <linux/nmi.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020034#include <linux/cpu.h>
Hidetoshi Seto14a02532009-04-30 16:04:51 +090035#include <linux/smp.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020036#include <linux/fs.h>
Andi Kleen9b1beaf2009-05-27 21:56:59 +020037#include <linux/mm.h>
Huang Ying5be9ed22009-07-31 09:41:42 +080038#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Ingo Molnare9eee032009-04-08 12:31:17 +020040#include <asm/processor.h>
Andi Kleenccc3c312009-05-27 21:56:54 +020041#include <asm/hw_irq.h>
42#include <asm/apic.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020043#include <asm/idle.h>
Andi Kleenccc3c312009-05-27 21:56:54 +020044#include <asm/ipi.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020045#include <asm/mce.h>
46#include <asm/msr.h>
Ingo Molnare9eee032009-04-08 12:31:17 +020047
Andi Kleenbd19a5e2009-05-27 21:56:55 +020048#include "mce-internal.h"
Ingo Molnar711c2e42009-04-08 12:31:26 +020049
Ingo Molnar2aa2b50dd2010-03-14 08:57:03 +010050static DEFINE_MUTEX(mce_read_mutex);
51
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -080052#define rcu_dereference_check_mce(p) \
53 rcu_dereference_check((p), \
54 rcu_read_lock_sched_held() || \
55 lockdep_is_held(&mce_read_mutex))
56
Hidetoshi Seto8968f9d2009-10-13 16:19:41 +090057#define CREATE_TRACE_POINTS
58#include <trace/events/mce.h>
59
Hidetoshi Seto4e5b3e62009-06-15 17:20:20 +090060int mce_disabled __read_mostly;
Andi Kleen04b2b1a2009-04-28 22:50:19 +020061
Ingo Molnare9eee032009-04-08 12:31:17 +020062#define MISC_MCELOG_MINOR 227
Andi Kleen0d7482e32009-02-17 23:07:13 +010063
Andi Kleen3c079792009-05-27 21:56:55 +020064#define SPINUNIT 100 /* 100ns */
65
Andi Kleen553f2652006-04-07 19:49:57 +020066atomic_t mce_entry;
67
Andi Kleen01ca79f2009-05-27 21:56:52 +020068DEFINE_PER_CPU(unsigned, mce_exception_count);
69
Tim Hockinbd784322007-07-21 17:10:37 +020070/*
71 * Tolerant levels:
72 * 0: always panic on uncorrected errors, log corrected errors
73 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
74 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
75 * 3: never panic or SIGBUS, log all errors (for testing only)
76 */
Hidetoshi Seto4e5b3e62009-06-15 17:20:20 +090077static int tolerant __read_mostly = 1;
78static int banks __read_mostly;
Hidetoshi Seto4e5b3e62009-06-15 17:20:20 +090079static int rip_msr __read_mostly;
80static int mce_bootlog __read_mostly = -1;
81static int monarch_timeout __read_mostly = -1;
82static int mce_panic_timeout __read_mostly;
83static int mce_dont_log_ce __read_mostly;
84int mce_cmci_disabled __read_mostly;
85int mce_ignore_ce __read_mostly;
86int mce_ser __read_mostly;
Andi Kleena98f0dd2007-02-13 13:26:23 +010087
Andi Kleencebe1822009-07-09 00:31:43 +020088struct mce_bank *mce_banks __read_mostly;
89
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +090090/* User mode helper program triggered by machine check event */
91static unsigned long mce_need_notify;
92static char mce_helper[128];
93static char *mce_helper_argv[2] = { mce_helper, NULL };
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Tim Hockine02e68d2007-07-21 17:10:36 +020095static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
Andi Kleen3c079792009-05-27 21:56:55 +020096static DEFINE_PER_CPU(struct mce, mces_seen);
97static int cpu_missing;
98
Borislav Petkovfb253192009-10-07 13:20:38 +020099/*
100 * CPU/chipset specific EDAC code can register a notifier call here to print
101 * MCE errors in a human-readable form.
102 */
103ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
104EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
105
106static int default_decode_mce(struct notifier_block *nb, unsigned long val,
107 void *data)
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200108{
109 pr_emerg("No human readable MCE decoding support on this CPU type.\n");
110 pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
Borislav Petkovfb253192009-10-07 13:20:38 +0200111
112 return NOTIFY_STOP;
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200113}
114
Borislav Petkovfb253192009-10-07 13:20:38 +0200115static struct notifier_block mce_dec_nb = {
116 .notifier_call = default_decode_mce,
117 .priority = -1,
118};
Tim Hockine02e68d2007-07-21 17:10:36 +0200119
Andi Kleenee031c32009-02-12 13:49:34 +0100120/* MCA banks polled by the period polling timer for corrected events */
121DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
122 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
123};
124
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200125static DEFINE_PER_CPU(struct work_struct, mce_work);
126
Andi Kleenb5f2fa42009-02-12 13:43:22 +0100127/* Do initial initialization of a struct mce */
128void mce_setup(struct mce *m)
129{
130 memset(m, 0, sizeof(struct mce));
Andi Kleend620c672009-05-27 21:56:56 +0200131 m->cpu = m->extcpu = smp_processor_id();
Andi Kleenb5f2fa42009-02-12 13:43:22 +0100132 rdtscll(m->tsc);
Andi Kleen8ee08342009-05-27 21:56:56 +0200133 /* We hope get_seconds stays lockless */
134 m->time = get_seconds();
135 m->cpuvendor = boot_cpu_data.x86_vendor;
136 m->cpuid = cpuid_eax(1);
137#ifdef CONFIG_SMP
138 m->socketid = cpu_data(m->extcpu).phys_proc_id;
139#endif
140 m->apicid = cpu_data(m->extcpu).initial_apicid;
141 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
Andi Kleenb5f2fa42009-02-12 13:43:22 +0100142}
143
Andi Kleenea149b32009-04-29 19:31:00 +0200144DEFINE_PER_CPU(struct mce, injectm);
145EXPORT_PER_CPU_SYMBOL_GPL(injectm);
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
148 * Lockless MCE logging infrastructure.
149 * This avoids deadlocks on printk locks without having to break locks. Also
150 * separate MCEs from kernel messages to avoid bogus bug reports.
151 */
152
Adrian Bunk231fd902008-01-30 13:30:30 +0100153static struct mce_log mcelog = {
Andi Kleenf6fb0ac2009-05-27 21:56:55 +0200154 .signature = MCE_LOG_SIGNATURE,
155 .len = MCE_LOG_LEN,
156 .recordlen = sizeof(struct mce),
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200157};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159void mce_log(struct mce *mce)
160{
161 unsigned next, entry;
Ingo Molnare9eee032009-04-08 12:31:17 +0200162
Hidetoshi Seto8968f9d2009-10-13 16:19:41 +0900163 /* Emit the trace record: */
164 trace_mce_record(mce);
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 mce->finished = 0;
Mike Waychison76441432005-09-30 00:01:27 +0200167 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 for (;;) {
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -0800169 entry = rcu_dereference_check_mce(mcelog.next);
Andi Kleen673242c2005-09-12 18:49:24 +0200170 for (;;) {
Ingo Molnare9eee032009-04-08 12:31:17 +0200171 /*
172 * When the buffer fills up discard new entries.
173 * Assume that the earlier errors are the more
174 * interesting ones:
175 */
Andi Kleen673242c2005-09-12 18:49:24 +0200176 if (entry >= MCE_LOG_LEN) {
Hidetoshi Seto14a02532009-04-30 16:04:51 +0900177 set_bit(MCE_OVERFLOW,
178 (unsigned long *)&mcelog.flags);
Andi Kleen673242c2005-09-12 18:49:24 +0200179 return;
180 }
Ingo Molnare9eee032009-04-08 12:31:17 +0200181 /* Old left over entry. Skip: */
Andi Kleen673242c2005-09-12 18:49:24 +0200182 if (mcelog.entry[entry].finished) {
183 entry++;
184 continue;
185 }
Mike Waychison76441432005-09-30 00:01:27 +0200186 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 smp_rmb();
189 next = entry + 1;
190 if (cmpxchg(&mcelog.next, entry, next) == entry)
191 break;
192 }
193 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
Mike Waychison76441432005-09-30 00:01:27 +0200194 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 mcelog.entry[entry].finished = 1;
Mike Waychison76441432005-09-30 00:01:27 +0200196 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Andi Kleena0189c72009-05-27 21:56:54 +0200198 mce->finished = 1;
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +0900199 set_bit(0, &mce_need_notify);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900202static void print_mce(struct mce *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200204 pr_emerg("CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
Andi Kleend620c672009-05-27 21:56:56 +0200205 m->extcpu, m->mcgstatus, m->bank, m->status);
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200206
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100207 if (m->ip) {
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200208 pr_emerg("RIP%s %02x:<%016Lx> ",
209 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
210 m->cs, m->ip);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 if (m->cs == __KERNEL_CS)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100213 print_symbol("{%s}", m->ip);
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200214 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 }
Borislav Petkov549d0422009-07-24 13:51:42 +0200216
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200217 pr_emerg("TSC %llx ", m->tsc);
218 if (m->addr)
219 pr_cont("ADDR %llx ", m->addr);
220 if (m->misc)
221 pr_cont("MISC %llx ", m->misc);
222
223 pr_cont("\n");
224 pr_emerg("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
225 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid);
226
227 /*
228 * Print out human-readable details about the MCE error,
Borislav Petkovfb253192009-10-07 13:20:38 +0200229 * (if the CPU has an implementation for that)
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200230 */
Borislav Petkovfb253192009-10-07 13:20:38 +0200231 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
Andi Kleen86503562009-05-27 21:56:58 +0200232}
233
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900234static void print_mce_head(void)
235{
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200236 pr_emerg("\nHARDWARE ERROR\n");
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900237}
238
Andi Kleen86503562009-05-27 21:56:58 +0200239static void print_mce_tail(void)
240{
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200241 pr_emerg("This is not a software problem!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242}
243
Andi Kleenf94b61c2009-05-27 21:56:55 +0200244#define PANIC_TIMEOUT 5 /* 5 seconds */
245
246static atomic_t mce_paniced;
247
Huang Yingbf783f92009-07-31 09:41:43 +0800248static int fake_panic;
249static atomic_t mce_fake_paniced;
250
Andi Kleenf94b61c2009-05-27 21:56:55 +0200251/* Panic in progress. Enable interrupts and wait for final IPI */
252static void wait_for_panic(void)
253{
254 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200255
Andi Kleenf94b61c2009-05-27 21:56:55 +0200256 preempt_disable();
257 local_irq_enable();
258 while (timeout-- > 0)
259 udelay(1);
Andi Kleen29b0f592009-05-27 21:56:56 +0200260 if (panic_timeout == 0)
261 panic_timeout = mce_panic_timeout;
Andi Kleenf94b61c2009-05-27 21:56:55 +0200262 panic("Panicing machine check CPU died");
263}
264
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200265static void mce_panic(char *msg, struct mce *final, char *exp)
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200266{
Huang Ying482908b2010-05-18 14:35:22 +0800267 int i, apei_err = 0;
Tim Hockine02e68d2007-07-21 17:10:36 +0200268
Huang Yingbf783f92009-07-31 09:41:43 +0800269 if (!fake_panic) {
270 /*
271 * Make sure only one CPU runs in machine check panic
272 */
273 if (atomic_inc_return(&mce_paniced) > 1)
274 wait_for_panic();
275 barrier();
Andi Kleenf94b61c2009-05-27 21:56:55 +0200276
Huang Yingbf783f92009-07-31 09:41:43 +0800277 bust_spinlocks(1);
278 console_verbose();
279 } else {
280 /* Don't log too much for fake panic */
281 if (atomic_inc_return(&mce_fake_paniced) > 1)
282 return;
283 }
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900284 print_mce_head();
Andi Kleena0189c72009-05-27 21:56:54 +0200285 /* First print corrected ones that are still unlogged */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 for (i = 0; i < MCE_LOG_LEN; i++) {
Andi Kleena0189c72009-05-27 21:56:54 +0200287 struct mce *m = &mcelog.entry[i];
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900288 if (!(m->status & MCI_STATUS_VAL))
289 continue;
Huang Ying482908b2010-05-18 14:35:22 +0800290 if (!(m->status & MCI_STATUS_UC)) {
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900291 print_mce(m);
Huang Ying482908b2010-05-18 14:35:22 +0800292 if (!apei_err)
293 apei_err = apei_write_mce(m);
294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 }
Andi Kleena0189c72009-05-27 21:56:54 +0200296 /* Now print uncorrected but with the final one last */
297 for (i = 0; i < MCE_LOG_LEN; i++) {
298 struct mce *m = &mcelog.entry[i];
299 if (!(m->status & MCI_STATUS_VAL))
300 continue;
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900301 if (!(m->status & MCI_STATUS_UC))
302 continue;
Huang Ying482908b2010-05-18 14:35:22 +0800303 if (!final || memcmp(m, final, sizeof(struct mce))) {
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900304 print_mce(m);
Huang Ying482908b2010-05-18 14:35:22 +0800305 if (!apei_err)
306 apei_err = apei_write_mce(m);
307 }
Andi Kleena0189c72009-05-27 21:56:54 +0200308 }
Huang Ying482908b2010-05-18 14:35:22 +0800309 if (final) {
Hidetoshi Seto77e26cc2009-06-11 16:04:35 +0900310 print_mce(final);
Huang Ying482908b2010-05-18 14:35:22 +0800311 if (!apei_err)
312 apei_err = apei_write_mce(final);
313 }
Andi Kleen3c079792009-05-27 21:56:55 +0200314 if (cpu_missing)
315 printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
Andi Kleen86503562009-05-27 21:56:58 +0200316 print_mce_tail();
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200317 if (exp)
318 printk(KERN_EMERG "Machine check: %s\n", exp);
Huang Yingbf783f92009-07-31 09:41:43 +0800319 if (!fake_panic) {
320 if (panic_timeout == 0)
321 panic_timeout = mce_panic_timeout;
322 panic(msg);
323 } else
324 printk(KERN_EMERG "Fake kernel panic: %s\n", msg);
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200325}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
Andi Kleenea149b32009-04-29 19:31:00 +0200327/* Support code for software error injection */
328
329static int msr_to_offset(u32 msr)
330{
331 unsigned bank = __get_cpu_var(injectm.bank);
Ingo Molnarf436f8b2009-10-01 16:14:32 +0200332
Andi Kleenea149b32009-04-29 19:31:00 +0200333 if (msr == rip_msr)
334 return offsetof(struct mce, ip);
Andi Kleena2d32bc2009-07-09 00:31:44 +0200335 if (msr == MSR_IA32_MCx_STATUS(bank))
Andi Kleenea149b32009-04-29 19:31:00 +0200336 return offsetof(struct mce, status);
Andi Kleena2d32bc2009-07-09 00:31:44 +0200337 if (msr == MSR_IA32_MCx_ADDR(bank))
Andi Kleenea149b32009-04-29 19:31:00 +0200338 return offsetof(struct mce, addr);
Andi Kleena2d32bc2009-07-09 00:31:44 +0200339 if (msr == MSR_IA32_MCx_MISC(bank))
Andi Kleenea149b32009-04-29 19:31:00 +0200340 return offsetof(struct mce, misc);
341 if (msr == MSR_IA32_MCG_STATUS)
342 return offsetof(struct mce, mcgstatus);
343 return -1;
344}
345
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200346/* MSR access wrappers used for error injection */
347static u64 mce_rdmsrl(u32 msr)
348{
349 u64 v;
Ingo Molnar11868a22009-09-23 17:49:55 +0200350
Andi Kleenea149b32009-04-29 19:31:00 +0200351 if (__get_cpu_var(injectm).finished) {
352 int offset = msr_to_offset(msr);
Ingo Molnar11868a22009-09-23 17:49:55 +0200353
Andi Kleenea149b32009-04-29 19:31:00 +0200354 if (offset < 0)
355 return 0;
356 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
357 }
Ingo Molnar11868a22009-09-23 17:49:55 +0200358
359 if (rdmsrl_safe(msr, &v)) {
360 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
361 /*
362 * Return zero in case the access faulted. This should
363 * not happen normally but can happen if the CPU does
364 * something weird, or if the code is buggy.
365 */
366 v = 0;
367 }
368
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200369 return v;
370}
371
372static void mce_wrmsrl(u32 msr, u64 v)
373{
Andi Kleenea149b32009-04-29 19:31:00 +0200374 if (__get_cpu_var(injectm).finished) {
375 int offset = msr_to_offset(msr);
Ingo Molnar11868a22009-09-23 17:49:55 +0200376
Andi Kleenea149b32009-04-29 19:31:00 +0200377 if (offset >= 0)
378 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
379 return;
380 }
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200381 wrmsrl(msr, v);
382}
383
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200384/*
385 * Simple lockless ring to communicate PFNs from the exception handler with the
386 * process context work function. This is vastly simplified because there's
387 * only a single reader and a single writer.
388 */
389#define MCE_RING_SIZE 16 /* we use one entry less */
390
391struct mce_ring {
392 unsigned short start;
393 unsigned short end;
394 unsigned long ring[MCE_RING_SIZE];
395};
396static DEFINE_PER_CPU(struct mce_ring, mce_ring);
397
398/* Runs with CPU affinity in workqueue */
399static int mce_ring_empty(void)
400{
401 struct mce_ring *r = &__get_cpu_var(mce_ring);
402
403 return r->start == r->end;
404}
405
406static int mce_ring_get(unsigned long *pfn)
407{
408 struct mce_ring *r;
409 int ret = 0;
410
411 *pfn = 0;
412 get_cpu();
413 r = &__get_cpu_var(mce_ring);
414 if (r->start == r->end)
415 goto out;
416 *pfn = r->ring[r->start];
417 r->start = (r->start + 1) % MCE_RING_SIZE;
418 ret = 1;
419out:
420 put_cpu();
421 return ret;
422}
423
424/* Always runs in MCE context with preempt off */
425static int mce_ring_add(unsigned long pfn)
426{
427 struct mce_ring *r = &__get_cpu_var(mce_ring);
428 unsigned next;
429
430 next = (r->end + 1) % MCE_RING_SIZE;
431 if (next == r->start)
432 return -1;
433 r->ring[r->end] = pfn;
434 wmb();
435 r->end = next;
436 return 0;
437}
438
Andi Kleen88ccbed2009-02-12 13:49:36 +0100439int mce_available(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
Andi Kleen04b2b1a2009-04-28 22:50:19 +0200441 if (mce_disabled)
Andi Kleen5b4408f2009-02-12 13:39:30 +0100442 return 0;
Akinobu Mita3d1712c2006-03-24 03:15:11 -0800443 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444}
445
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200446static void mce_schedule_work(void)
447{
448 if (!mce_ring_empty()) {
449 struct work_struct *work = &__get_cpu_var(mce_work);
450 if (!work_pending(work))
451 schedule_work(work);
452 }
453}
454
Huang Ying1b2797d2009-05-27 21:56:51 +0200455/*
456 * Get the address of the instruction at the time of the machine check
457 * error.
458 */
Andi Kleen94ad8472005-04-16 15:25:09 -0700459static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
460{
Huang Ying1b2797d2009-05-27 21:56:51 +0200461
462 if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100463 m->ip = regs->ip;
Andi Kleen94ad8472005-04-16 15:25:09 -0700464 m->cs = regs->cs;
465 } else {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100466 m->ip = 0;
Andi Kleen94ad8472005-04-16 15:25:09 -0700467 m->cs = 0;
468 }
Huang Ying1b2797d2009-05-27 21:56:51 +0200469 if (rip_msr)
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200470 m->ip = mce_rdmsrl(rip_msr);
Andi Kleen94ad8472005-04-16 15:25:09 -0700471}
472
Ingo Molnar11868a22009-09-23 17:49:55 +0200473#ifdef CONFIG_X86_LOCAL_APIC
Andi Kleenccc3c312009-05-27 21:56:54 +0200474/*
475 * Called after interrupts have been reenabled again
476 * when a MCE happened during an interrupts off region
477 * in the kernel.
478 */
479asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
480{
481 ack_APIC_irq();
482 exit_idle();
483 irq_enter();
Andi Kleen9ff36ee2009-05-27 21:56:58 +0200484 mce_notify_irq();
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200485 mce_schedule_work();
Andi Kleenccc3c312009-05-27 21:56:54 +0200486 irq_exit();
487}
488#endif
489
490static void mce_report_event(struct pt_regs *regs)
491{
492 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
Andi Kleen9ff36ee2009-05-27 21:56:58 +0200493 mce_notify_irq();
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200494 /*
495 * Triggering the work queue here is just an insurance
496 * policy in case the syscall exit notify handler
497 * doesn't run soon enough or ends up running on the
498 * wrong CPU (can happen when audit sleeps)
499 */
500 mce_schedule_work();
Andi Kleenccc3c312009-05-27 21:56:54 +0200501 return;
502 }
503
504#ifdef CONFIG_X86_LOCAL_APIC
505 /*
506 * Without APIC do not notify. The event will be picked
507 * up eventually.
508 */
509 if (!cpu_has_apic)
510 return;
511
512 /*
513 * When interrupts are disabled we cannot use
514 * kernel services safely. Trigger an self interrupt
515 * through the APIC to instead do the notification
516 * after interrupts are reenabled again.
517 */
518 apic->send_IPI_self(MCE_SELF_VECTOR);
519
520 /*
521 * Wait for idle afterwards again so that we don't leave the
522 * APIC in a non idle state because the normal APIC writes
523 * cannot exclude us.
524 */
525 apic_wait_icr_idle();
526#endif
527}
528
Andi Kleenca84f692009-05-27 21:56:57 +0200529DEFINE_PER_CPU(unsigned, mce_poll_count);
530
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200531/*
Andi Kleenb79109c2009-02-12 13:43:23 +0100532 * Poll for corrected events or events that happened before reset.
533 * Those are just logged through /dev/mcelog.
534 *
535 * This is executed in standard interrupt context.
Andi Kleened7290d2009-05-27 21:56:57 +0200536 *
537 * Note: spec recommends to panic for fatal unsignalled
538 * errors here. However this would be quite problematic --
539 * we would need to reimplement the Monarch handling and
540 * it would mess up the exclusion between exception handler
541 * and poll hander -- * so we skip this for now.
542 * These cases should not happen anyways, or only when the CPU
543 * is already totally * confused. In this case it's likely it will
544 * not fully execute the machine check handler either.
Andi Kleenb79109c2009-02-12 13:43:23 +0100545 */
Andi Kleenee031c32009-02-12 13:49:34 +0100546void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
Andi Kleenb79109c2009-02-12 13:43:23 +0100547{
548 struct mce m;
549 int i;
550
Andi Kleenca84f692009-05-27 21:56:57 +0200551 __get_cpu_var(mce_poll_count)++;
552
Andi Kleenb79109c2009-02-12 13:43:23 +0100553 mce_setup(&m);
554
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200555 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
Andi Kleenb79109c2009-02-12 13:43:23 +0100556 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +0200557 if (!mce_banks[i].ctl || !test_bit(i, *b))
Andi Kleenb79109c2009-02-12 13:43:23 +0100558 continue;
559
560 m.misc = 0;
561 m.addr = 0;
562 m.bank = i;
563 m.tsc = 0;
564
565 barrier();
Andi Kleena2d32bc2009-07-09 00:31:44 +0200566 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
Andi Kleenb79109c2009-02-12 13:43:23 +0100567 if (!(m.status & MCI_STATUS_VAL))
568 continue;
569
570 /*
Andi Kleened7290d2009-05-27 21:56:57 +0200571 * Uncorrected or signalled events are handled by the exception
572 * handler when it is enabled, so don't process those here.
Andi Kleenb79109c2009-02-12 13:43:23 +0100573 *
574 * TBD do the same check for MCI_STATUS_EN here?
575 */
Andi Kleened7290d2009-05-27 21:56:57 +0200576 if (!(flags & MCP_UC) &&
577 (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
Andi Kleenb79109c2009-02-12 13:43:23 +0100578 continue;
579
580 if (m.status & MCI_STATUS_MISCV)
Andi Kleena2d32bc2009-07-09 00:31:44 +0200581 m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
Andi Kleenb79109c2009-02-12 13:43:23 +0100582 if (m.status & MCI_STATUS_ADDRV)
Andi Kleena2d32bc2009-07-09 00:31:44 +0200583 m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
Andi Kleenb79109c2009-02-12 13:43:23 +0100584
585 if (!(flags & MCP_TIMESTAMP))
586 m.tsc = 0;
587 /*
588 * Don't get the IP here because it's unlikely to
589 * have anything to do with the actual error location.
590 */
Hidetoshi Seto62fdac52009-06-11 16:06:07 +0900591 if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
Andi Kleen5679af42009-04-07 17:06:55 +0200592 mce_log(&m);
593 add_taint(TAINT_MACHINE_CHECK);
594 }
Andi Kleenb79109c2009-02-12 13:43:23 +0100595
596 /*
597 * Clear state for this bank.
598 */
Andi Kleena2d32bc2009-07-09 00:31:44 +0200599 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
Andi Kleenb79109c2009-02-12 13:43:23 +0100600 }
601
602 /*
603 * Don't clear MCG_STATUS here because it's only defined for
604 * exceptions.
605 */
Andi Kleen88921be2009-05-27 21:56:51 +0200606
607 sync_core();
Andi Kleenb79109c2009-02-12 13:43:23 +0100608}
Andi Kleenea149b32009-04-29 19:31:00 +0200609EXPORT_SYMBOL_GPL(machine_check_poll);
Andi Kleenb79109c2009-02-12 13:43:23 +0100610
611/*
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200612 * Do a quick check if any of the events requires a panic.
613 * This decides if we keep the events around or clear them.
614 */
615static int mce_no_way_out(struct mce *m, char **msg)
616{
617 int i;
618
619 for (i = 0; i < banks; i++) {
Andi Kleena2d32bc2009-07-09 00:31:44 +0200620 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200621 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
622 return 1;
623 }
624 return 0;
625}
626
627/*
Andi Kleen3c079792009-05-27 21:56:55 +0200628 * Variable to establish order between CPUs while scanning.
629 * Each CPU spins initially until executing is equal its number.
630 */
631static atomic_t mce_executing;
632
633/*
634 * Defines order of CPUs on entry. First CPU becomes Monarch.
635 */
636static atomic_t mce_callin;
637
638/*
639 * Check if a timeout waiting for other CPUs happened.
640 */
641static int mce_timed_out(u64 *t)
642{
643 /*
644 * The others already did panic for some reason.
645 * Bail out like in a timeout.
646 * rmb() to tell the compiler that system_state
647 * might have been modified by someone else.
648 */
649 rmb();
650 if (atomic_read(&mce_paniced))
651 wait_for_panic();
652 if (!monarch_timeout)
653 goto out;
654 if ((s64)*t < SPINUNIT) {
655 /* CHECKME: Make panic default for 1 too? */
656 if (tolerant < 1)
657 mce_panic("Timeout synchronizing machine check over CPUs",
658 NULL, NULL);
659 cpu_missing = 1;
660 return 1;
661 }
662 *t -= SPINUNIT;
663out:
664 touch_nmi_watchdog();
665 return 0;
666}
667
668/*
669 * The Monarch's reign. The Monarch is the CPU who entered
670 * the machine check handler first. It waits for the others to
671 * raise the exception too and then grades them. When any
672 * error is fatal panic. Only then let the others continue.
673 *
674 * The other CPUs entering the MCE handler will be controlled by the
675 * Monarch. They are called Subjects.
676 *
677 * This way we prevent any potential data corruption in a unrecoverable case
678 * and also makes sure always all CPU's errors are examined.
679 *
Hidetoshi Seto680b6cf2009-08-26 16:20:36 +0900680 * Also this detects the case of a machine check event coming from outer
Andi Kleen3c079792009-05-27 21:56:55 +0200681 * space (not detected by any CPUs) In this case some external agent wants
682 * us to shut down, so panic too.
683 *
684 * The other CPUs might still decide to panic if the handler happens
685 * in a unrecoverable place, but in this case the system is in a semi-stable
686 * state and won't corrupt anything by itself. It's ok to let the others
687 * continue for a bit first.
688 *
689 * All the spin loops have timeouts; when a timeout happens a CPU
690 * typically elects itself to be Monarch.
691 */
692static void mce_reign(void)
693{
694 int cpu;
695 struct mce *m = NULL;
696 int global_worst = 0;
697 char *msg = NULL;
698 char *nmsg = NULL;
699
700 /*
701 * This CPU is the Monarch and the other CPUs have run
702 * through their handlers.
703 * Grade the severity of the errors of all the CPUs.
704 */
705 for_each_possible_cpu(cpu) {
706 int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
707 &nmsg);
708 if (severity > global_worst) {
709 msg = nmsg;
710 global_worst = severity;
711 m = &per_cpu(mces_seen, cpu);
712 }
713 }
714
715 /*
716 * Cannot recover? Panic here then.
717 * This dumps all the mces in the log buffer and stops the
718 * other CPUs.
719 */
720 if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
Andi Kleenac960372009-05-27 21:56:58 +0200721 mce_panic("Fatal Machine check", m, msg);
Andi Kleen3c079792009-05-27 21:56:55 +0200722
723 /*
724 * For UC somewhere we let the CPU who detects it handle it.
725 * Also must let continue the others, otherwise the handling
726 * CPU could deadlock on a lock.
727 */
728
729 /*
730 * No machine check event found. Must be some external
731 * source or one CPU is hung. Panic.
732 */
Hidetoshi Seto680b6cf2009-08-26 16:20:36 +0900733 if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
Andi Kleen3c079792009-05-27 21:56:55 +0200734 mce_panic("Machine check from unknown source", NULL, NULL);
735
736 /*
737 * Now clear all the mces_seen so that they don't reappear on
738 * the next mce.
739 */
740 for_each_possible_cpu(cpu)
741 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
742}
743
744static atomic_t global_nwo;
745
746/*
747 * Start of Monarch synchronization. This waits until all CPUs have
748 * entered the exception handler and then determines if any of them
749 * saw a fatal event that requires panic. Then it executes them
750 * in the entry order.
751 * TBD double check parallel CPU hotunplug
752 */
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900753static int mce_start(int *no_way_out)
Andi Kleen3c079792009-05-27 21:56:55 +0200754{
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900755 int order;
Andi Kleen3c079792009-05-27 21:56:55 +0200756 int cpus = num_online_cpus();
757 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
758
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900759 if (!timeout)
760 return -1;
Andi Kleen3c079792009-05-27 21:56:55 +0200761
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900762 atomic_add(*no_way_out, &global_nwo);
Huang Ying184e1fd2009-06-15 15:37:07 +0800763 /*
764 * global_nwo should be updated before mce_callin
765 */
766 smp_wmb();
Borislav Petkova95436e2009-06-20 23:28:22 -0700767 order = atomic_inc_return(&mce_callin);
Andi Kleen3c079792009-05-27 21:56:55 +0200768
769 /*
770 * Wait for everyone.
771 */
772 while (atomic_read(&mce_callin) != cpus) {
773 if (mce_timed_out(&timeout)) {
774 atomic_set(&global_nwo, 0);
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900775 return -1;
Andi Kleen3c079792009-05-27 21:56:55 +0200776 }
777 ndelay(SPINUNIT);
778 }
779
780 /*
Huang Ying184e1fd2009-06-15 15:37:07 +0800781 * mce_callin should be read before global_nwo
782 */
783 smp_rmb();
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900784
785 if (order == 1) {
786 /*
787 * Monarch: Starts executing now, the others wait.
788 */
789 atomic_set(&mce_executing, 1);
790 } else {
791 /*
792 * Subject: Now start the scanning loop one by one in
793 * the original callin order.
794 * This way when there are any shared banks it will be
795 * only seen by one CPU before cleared, avoiding duplicates.
796 */
797 while (atomic_read(&mce_executing) < order) {
798 if (mce_timed_out(&timeout)) {
799 atomic_set(&global_nwo, 0);
800 return -1;
801 }
802 ndelay(SPINUNIT);
803 }
804 }
805
Huang Ying184e1fd2009-06-15 15:37:07 +0800806 /*
Andi Kleen3c079792009-05-27 21:56:55 +0200807 * Cache the global no_way_out state.
808 */
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900809 *no_way_out = atomic_read(&global_nwo);
Andi Kleen3c079792009-05-27 21:56:55 +0200810
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900811 return order;
Andi Kleen3c079792009-05-27 21:56:55 +0200812}
813
814/*
815 * Synchronize between CPUs after main scanning loop.
816 * This invokes the bulk of the Monarch processing.
817 */
818static int mce_end(int order)
819{
820 int ret = -1;
821 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
822
823 if (!timeout)
824 goto reset;
825 if (order < 0)
826 goto reset;
827
828 /*
829 * Allow others to run.
830 */
831 atomic_inc(&mce_executing);
832
833 if (order == 1) {
834 /* CHECKME: Can this race with a parallel hotplug? */
835 int cpus = num_online_cpus();
836
837 /*
838 * Monarch: Wait for everyone to go through their scanning
839 * loops.
840 */
841 while (atomic_read(&mce_executing) <= cpus) {
842 if (mce_timed_out(&timeout))
843 goto reset;
844 ndelay(SPINUNIT);
845 }
846
847 mce_reign();
848 barrier();
849 ret = 0;
850 } else {
851 /*
852 * Subject: Wait for Monarch to finish.
853 */
854 while (atomic_read(&mce_executing) != 0) {
855 if (mce_timed_out(&timeout))
856 goto reset;
857 ndelay(SPINUNIT);
858 }
859
860 /*
861 * Don't reset anything. That's done by the Monarch.
862 */
863 return 0;
864 }
865
866 /*
867 * Reset all global state.
868 */
869reset:
870 atomic_set(&global_nwo, 0);
871 atomic_set(&mce_callin, 0);
872 barrier();
873
874 /*
875 * Let others run again.
876 */
877 atomic_set(&mce_executing, 0);
878 return ret;
879}
880
Andi Kleen9b1beaf2009-05-27 21:56:59 +0200881/*
882 * Check if the address reported by the CPU is in a format we can parse.
883 * It would be possible to add code for most other cases, but all would
884 * be somewhat complicated (e.g. segment offset would require an instruction
885 * parser). So only support physical addresses upto page granuality for now.
886 */
887static int mce_usable_address(struct mce *m)
888{
889 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
890 return 0;
891 if ((m->misc & 0x3f) > PAGE_SHIFT)
892 return 0;
893 if (((m->misc >> 6) & 7) != MCM_ADDR_PHYS)
894 return 0;
895 return 1;
896}
897
Andi Kleen3c079792009-05-27 21:56:55 +0200898static void mce_clear_state(unsigned long *toclear)
899{
900 int i;
901
902 for (i = 0; i < banks; i++) {
903 if (test_bit(i, toclear))
Andi Kleena2d32bc2009-07-09 00:31:44 +0200904 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
Andi Kleen3c079792009-05-27 21:56:55 +0200905 }
906}
907
908/*
Andi Kleenb79109c2009-02-12 13:43:23 +0100909 * The actual machine check handler. This only handles real
910 * exceptions when something got corrupted coming in through int 18.
911 *
912 * This is executed in NMI context not subject to normal locking rules. This
913 * implies that most kernel services cannot be safely used. Don't even
914 * think about putting a printk in there!
Andi Kleen3c079792009-05-27 21:56:55 +0200915 *
916 * On Intel systems this is entered on all CPUs in parallel through
917 * MCE broadcast. However some CPUs might be broken beyond repair,
918 * so be always careful when synchronizing with others.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 */
Ingo Molnare9eee032009-04-08 12:31:17 +0200920void do_machine_check(struct pt_regs *regs, long error_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921{
Andi Kleen3c079792009-05-27 21:56:55 +0200922 struct mce m, *final;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 int i;
Andi Kleen3c079792009-05-27 21:56:55 +0200924 int worst = 0;
925 int severity;
926 /*
927 * Establish sequential order between the CPUs entering the machine
928 * check handler.
929 */
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900930 int order;
Tim Hockinbd784322007-07-21 17:10:37 +0200931 /*
932 * If no_way_out gets set, there is no safe way to recover from this
933 * MCE. If tolerant is cranked up, we'll try anyway.
934 */
935 int no_way_out = 0;
936 /*
937 * If kill_it gets set, there might be a way to recover from this
938 * error.
939 */
940 int kill_it = 0;
Andi Kleenb79109c2009-02-12 13:43:23 +0100941 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
Andi Kleenbd19a5e2009-05-27 21:56:55 +0200942 char *msg = "Unknown";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Andi Kleen553f2652006-04-07 19:49:57 +0200944 atomic_inc(&mce_entry);
945
Andi Kleen01ca79f2009-05-27 21:56:52 +0200946 __get_cpu_var(mce_exception_count)++;
947
Andi Kleenb79109c2009-02-12 13:43:23 +0100948 if (notify_die(DIE_NMI, "machine check", regs, error_code,
Jan Beulich22f59912008-01-30 13:31:23 +0100949 18, SIGKILL) == NOTIFY_STOP)
Andi Kleen32561692009-05-27 21:56:53 +0200950 goto out;
Andi Kleenb79109c2009-02-12 13:43:23 +0100951 if (!banks)
Andi Kleen32561692009-05-27 21:56:53 +0200952 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Andi Kleenb5f2fa42009-02-12 13:43:22 +0100954 mce_setup(&m);
955
Andi Kleen5f8c1a52009-04-29 19:29:12 +0200956 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
Andi Kleen3c079792009-05-27 21:56:55 +0200957 final = &__get_cpu_var(mces_seen);
958 *final = m;
959
Hidetoshi Seto680b6cf2009-08-26 16:20:36 +0900960 no_way_out = mce_no_way_out(&m, &msg);
961
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 barrier();
963
Andi Kleen3c079792009-05-27 21:56:55 +0200964 /*
Andi Kleened7290d2009-05-27 21:56:57 +0200965 * When no restart IP must always kill or panic.
966 */
967 if (!(m.mcgstatus & MCG_STATUS_RIPV))
968 kill_it = 1;
969
970 /*
Andi Kleen3c079792009-05-27 21:56:55 +0200971 * Go through all the banks in exclusion of the other CPUs.
972 * This way we don't report duplicated events on shared banks
973 * because the first one to see it will clear it.
974 */
Hidetoshi Seto7fb06fc2009-06-15 18:18:43 +0900975 order = mce_start(&no_way_out);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 for (i = 0; i < banks; i++) {
Andi Kleenb79109c2009-02-12 13:43:23 +0100977 __clear_bit(i, toclear);
Andi Kleencebe1822009-07-09 00:31:43 +0200978 if (!mce_banks[i].ctl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 continue;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +0200980
981 m.misc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 m.addr = 0;
983 m.bank = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Andi Kleena2d32bc2009-07-09 00:31:44 +0200985 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if ((m.status & MCI_STATUS_VAL) == 0)
987 continue;
988
Andi Kleenb79109c2009-02-12 13:43:23 +0100989 /*
Andi Kleened7290d2009-05-27 21:56:57 +0200990 * Non uncorrected or non signaled errors are handled by
991 * machine_check_poll. Leave them alone, unless this panics.
Andi Kleenb79109c2009-02-12 13:43:23 +0100992 */
Andi Kleened7290d2009-05-27 21:56:57 +0200993 if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
994 !no_way_out)
Andi Kleenb79109c2009-02-12 13:43:23 +0100995 continue;
996
997 /*
998 * Set taint even when machine check was not enabled.
999 */
1000 add_taint(TAINT_MACHINE_CHECK);
1001
Andi Kleened7290d2009-05-27 21:56:57 +02001002 severity = mce_severity(&m, tolerant, NULL);
Andi Kleenb79109c2009-02-12 13:43:23 +01001003
Andi Kleened7290d2009-05-27 21:56:57 +02001004 /*
1005 * When machine check was for corrected handler don't touch,
1006 * unless we're panicing.
1007 */
1008 if (severity == MCE_KEEP_SEVERITY && !no_way_out)
1009 continue;
1010 __set_bit(i, toclear);
1011 if (severity == MCE_NO_SEVERITY) {
Andi Kleenb79109c2009-02-12 13:43:23 +01001012 /*
1013 * Machine check event was not enabled. Clear, but
1014 * ignore.
1015 */
1016 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 }
1018
Andi Kleened7290d2009-05-27 21:56:57 +02001019 /*
1020 * Kill on action required.
1021 */
1022 if (severity == MCE_AR_SEVERITY)
1023 kill_it = 1;
1024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 if (m.status & MCI_STATUS_MISCV)
Andi Kleena2d32bc2009-07-09 00:31:44 +02001026 m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 if (m.status & MCI_STATUS_ADDRV)
Andi Kleena2d32bc2009-07-09 00:31:44 +02001028 m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001030 /*
1031 * Action optional error. Queue address for later processing.
1032 * When the ring overflows we just ignore the AO error.
1033 * RED-PEN add some logging mechanism when
1034 * usable_address or mce_add_ring fails.
1035 * RED-PEN don't ignore overflow for tolerant == 0
1036 */
1037 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1038 mce_ring_add(m.addr >> PAGE_SHIFT);
1039
Andi Kleen94ad8472005-04-16 15:25:09 -07001040 mce_get_rip(&m, regs);
Andi Kleenb79109c2009-02-12 13:43:23 +01001041 mce_log(&m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
Andi Kleen3c079792009-05-27 21:56:55 +02001043 if (severity > worst) {
1044 *final = m;
1045 worst = severity;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 }
1048
Andi Kleen3c079792009-05-27 21:56:55 +02001049 if (!no_way_out)
1050 mce_clear_state(toclear);
1051
Ingo Molnare9eee032009-04-08 12:31:17 +02001052 /*
Andi Kleen3c079792009-05-27 21:56:55 +02001053 * Do most of the synchronization with other CPUs.
1054 * When there's any problem use only local no_way_out state.
Ingo Molnare9eee032009-04-08 12:31:17 +02001055 */
Andi Kleen3c079792009-05-27 21:56:55 +02001056 if (mce_end(order) < 0)
1057 no_way_out = worst >= MCE_PANIC_SEVERITY;
Tim Hockinbd784322007-07-21 17:10:37 +02001058
1059 /*
1060 * If we have decided that we just CAN'T continue, and the user
Ingo Molnare9eee032009-04-08 12:31:17 +02001061 * has not set tolerant to an insane level, give up and die.
Andi Kleen3c079792009-05-27 21:56:55 +02001062 *
1063 * This is mainly used in the case when the system doesn't
1064 * support MCE broadcasting or it has been disabled.
Tim Hockinbd784322007-07-21 17:10:37 +02001065 */
1066 if (no_way_out && tolerant < 3)
Andi Kleenac960372009-05-27 21:56:58 +02001067 mce_panic("Fatal machine check on current CPU", final, msg);
Tim Hockinbd784322007-07-21 17:10:37 +02001068
1069 /*
1070 * If the error seems to be unrecoverable, something should be
1071 * done. Try to kill as little as possible. If we can kill just
1072 * one task, do that. If the user has set the tolerance very
1073 * high, don't try to do anything at all.
1074 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Andi Kleened7290d2009-05-27 21:56:57 +02001076 if (kill_it && tolerant < 3)
1077 force_sig(SIGBUS, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Tim Hockine02e68d2007-07-21 17:10:36 +02001079 /* notify userspace ASAP */
1080 set_thread_flag(TIF_MCE_NOTIFY);
1081
Andi Kleen3c079792009-05-27 21:56:55 +02001082 if (worst > 0)
1083 mce_report_event(regs);
Andi Kleen5f8c1a52009-04-29 19:29:12 +02001084 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
Andi Kleen32561692009-05-27 21:56:53 +02001085out:
Andi Kleen553f2652006-04-07 19:49:57 +02001086 atomic_dec(&mce_entry);
Andi Kleen88921be2009-05-27 21:56:51 +02001087 sync_core();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088}
Andi Kleenea149b32009-04-29 19:31:00 +02001089EXPORT_SYMBOL_GPL(do_machine_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001091/* dummy to break dependency. actual code is in mm/memory-failure.c */
1092void __attribute__((weak)) memory_failure(unsigned long pfn, int vector)
1093{
1094 printk(KERN_ERR "Action optional memory failure at %lx ignored\n", pfn);
1095}
1096
1097/*
1098 * Called after mce notification in process context. This code
1099 * is allowed to sleep. Call the high level VM handler to process
1100 * any corrupted pages.
1101 * Assume that the work queue code only calls this one at a time
1102 * per CPU.
1103 * Note we don't disable preemption, so this code might run on the wrong
1104 * CPU. In this case the event is picked up by the scheduled work queue.
1105 * This is merely a fast path to expedite processing in some common
1106 * cases.
1107 */
1108void mce_notify_process(void)
1109{
1110 unsigned long pfn;
1111 mce_notify_irq();
1112 while (mce_ring_get(&pfn))
1113 memory_failure(pfn, MCE_VECTOR);
1114}
1115
1116static void mce_process_work(struct work_struct *dummy)
1117{
1118 mce_notify_process();
1119}
1120
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001121#ifdef CONFIG_X86_MCE_INTEL
1122/***
1123 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
Simon Arlott676b1852007-10-20 01:25:36 +02001124 * @cpu: The CPU on which the event occurred.
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001125 * @status: Event status information
1126 *
1127 * This function should be called by the thermal interrupt after the
1128 * event has been processed and the decision was made to log the event
1129 * further.
1130 *
1131 * The status parameter will be saved to the 'status' field of 'struct mce'
1132 * and historically has been the register value of the
1133 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1134 */
Andi Kleenb5f2fa42009-02-12 13:43:22 +01001135void mce_log_therm_throt_event(__u64 status)
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001136{
1137 struct mce m;
1138
Andi Kleenb5f2fa42009-02-12 13:43:22 +01001139 mce_setup(&m);
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001140 m.bank = MCE_THERMAL_BANK;
1141 m.status = status;
Dmitriy Zavin15d5f832006-09-26 10:52:42 +02001142 mce_log(&m);
1143}
1144#endif /* CONFIG_X86_MCE_INTEL */
1145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146/*
Tim Hockin8a336b02007-05-02 19:27:19 +02001147 * Periodic polling timer for "silent" machine check errors. If the
1148 * poller finds an MCE, poll 2x faster. When the poller finds no more
1149 * errors, poll 2x slower (up to check_interval seconds).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151static int check_interval = 5 * 60; /* 5 minutes */
Ingo Molnare9eee032009-04-08 12:31:17 +02001152
Tejun Heo245b2e72009-06-24 15:13:48 +09001153static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
Andi Kleen52d168e2009-02-12 13:39:29 +01001154static DEFINE_PER_CPU(struct timer_list, mce_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Borislav Petkov5e099542009-10-16 12:31:32 +02001156static void mce_start_timer(unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157{
Andi Kleen52d168e2009-02-12 13:39:29 +01001158 struct timer_list *t = &per_cpu(mce_timer, data);
Andi Kleen6298c512009-04-09 12:28:22 +02001159 int *n;
Andi Kleen52d168e2009-02-12 13:39:29 +01001160
1161 WARN_ON(smp_processor_id() != data);
1162
Ingo Molnare9eee032009-04-08 12:31:17 +02001163 if (mce_available(&current_cpu_data)) {
Andi Kleenee031c32009-02-12 13:49:34 +01001164 machine_check_poll(MCP_TIMESTAMP,
1165 &__get_cpu_var(mce_poll_banks));
Ingo Molnare9eee032009-04-08 12:31:17 +02001166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 /*
Tim Hockine02e68d2007-07-21 17:10:36 +02001169 * Alert userspace if needed. If we logged an MCE, reduce the
1170 * polling interval, otherwise increase the polling interval.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 */
Tejun Heo245b2e72009-06-24 15:13:48 +09001172 n = &__get_cpu_var(mce_next_interval);
Andi Kleen9ff36ee2009-05-27 21:56:58 +02001173 if (mce_notify_irq())
Andi Kleen6298c512009-04-09 12:28:22 +02001174 *n = max(*n/2, HZ/100);
Hidetoshi Seto14a02532009-04-30 16:04:51 +09001175 else
Andi Kleen6298c512009-04-09 12:28:22 +02001176 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
Tim Hockin8a336b02007-05-02 19:27:19 +02001177
Andi Kleen6298c512009-04-09 12:28:22 +02001178 t->expires = jiffies + *n;
Hidetoshi Seto5be60662009-06-24 09:21:10 +09001179 add_timer_on(t, smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180}
1181
Andi Kleen9bd98402009-02-12 13:39:28 +01001182static void mce_do_trigger(struct work_struct *work)
1183{
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001184 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
Andi Kleen9bd98402009-02-12 13:39:28 +01001185}
1186
1187static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1188
Tim Hockine02e68d2007-07-21 17:10:36 +02001189/*
Andi Kleen9bd98402009-02-12 13:39:28 +01001190 * Notify the user(s) about new machine check events.
1191 * Can be called from interrupt context, but not from machine check/NMI
1192 * context.
Tim Hockine02e68d2007-07-21 17:10:36 +02001193 */
Andi Kleen9ff36ee2009-05-27 21:56:58 +02001194int mce_notify_irq(void)
Tim Hockine02e68d2007-07-21 17:10:36 +02001195{
Andi Kleen8457c842009-02-12 13:49:33 +01001196 /* Not more than two messages every minute */
1197 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1198
Tim Hockine02e68d2007-07-21 17:10:36 +02001199 clear_thread_flag(TIF_MCE_NOTIFY);
Ingo Molnare9eee032009-04-08 12:31:17 +02001200
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001201 if (test_and_clear_bit(0, &mce_need_notify)) {
Tim Hockine02e68d2007-07-21 17:10:36 +02001202 wake_up_interruptible(&mce_wait);
Andi Kleen9bd98402009-02-12 13:39:28 +01001203
1204 /*
1205 * There is no risk of missing notifications because
1206 * work_pending is always cleared before the function is
1207 * executed.
1208 */
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001209 if (mce_helper[0] && !work_pending(&mce_trigger_work))
Andi Kleen9bd98402009-02-12 13:39:28 +01001210 schedule_work(&mce_trigger_work);
Tim Hockine02e68d2007-07-21 17:10:36 +02001211
Andi Kleen8457c842009-02-12 13:49:33 +01001212 if (__ratelimit(&ratelimit))
Tim Hockine02e68d2007-07-21 17:10:36 +02001213 printk(KERN_INFO "Machine check events logged\n");
Tim Hockine02e68d2007-07-21 17:10:36 +02001214
1215 return 1;
1216 }
1217 return 0;
1218}
Andi Kleen9ff36ee2009-05-27 21:56:58 +02001219EXPORT_SYMBOL_GPL(mce_notify_irq);
Tim Hockine02e68d2007-07-21 17:10:36 +02001220
Hidetoshi Setocffd3772009-11-12 15:52:40 +09001221static int __cpuinit __mcheck_cpu_mce_banks_init(void)
Andi Kleencebe1822009-07-09 00:31:43 +02001222{
1223 int i;
1224
1225 mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
1226 if (!mce_banks)
1227 return -ENOMEM;
1228 for (i = 0; i < banks; i++) {
1229 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02001230
Andi Kleencebe1822009-07-09 00:31:43 +02001231 b->ctl = -1ULL;
1232 b->init = 1;
1233 }
1234 return 0;
1235}
1236
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001237/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 * Initialize Machine Checks for a CPU.
1239 */
Borislav Petkov5e099542009-10-16 12:31:32 +02001240static int __cpuinit __mcheck_cpu_cap_init(void)
Andi Kleen0d7482e32009-02-17 23:07:13 +01001241{
Andi Kleen0d7482e32009-02-17 23:07:13 +01001242 unsigned b;
Ingo Molnare9eee032009-04-08 12:31:17 +02001243 u64 cap;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001244
1245 rdmsrl(MSR_IA32_MCG_CAP, cap);
Thomas Gleixner01c66802009-04-08 12:31:24 +02001246
1247 b = cap & MCG_BANKCNT_MASK;
Roland Dreier93ae5012009-10-15 14:21:14 -07001248 if (!banks)
1249 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
Ingo Molnarb6592942009-04-08 12:31:27 +02001250
Andi Kleen0d7482e32009-02-17 23:07:13 +01001251 if (b > MAX_NR_BANKS) {
1252 printk(KERN_WARNING
1253 "MCE: Using only %u machine check banks out of %u\n",
1254 MAX_NR_BANKS, b);
1255 b = MAX_NR_BANKS;
1256 }
1257
1258 /* Don't support asymmetric configurations today */
1259 WARN_ON(banks != 0 && b != banks);
1260 banks = b;
Andi Kleencebe1822009-07-09 00:31:43 +02001261 if (!mce_banks) {
Hidetoshi Setocffd3772009-11-12 15:52:40 +09001262 int err = __mcheck_cpu_mce_banks_init();
Ingo Molnar11868a22009-09-23 17:49:55 +02001263
Andi Kleencebe1822009-07-09 00:31:43 +02001264 if (err)
1265 return err;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001266 }
1267
1268 /* Use accurate RIP reporting if available. */
Thomas Gleixner01c66802009-04-08 12:31:24 +02001269 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
Andi Kleen0d7482e32009-02-17 23:07:13 +01001270 rip_msr = MSR_IA32_MCG_EIP;
1271
Andi Kleened7290d2009-05-27 21:56:57 +02001272 if (cap & MCG_SER_P)
1273 mce_ser = 1;
1274
Andi Kleen0d7482e32009-02-17 23:07:13 +01001275 return 0;
1276}
1277
Borislav Petkov5e099542009-10-16 12:31:32 +02001278static void __mcheck_cpu_init_generic(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
Ingo Molnare9eee032009-04-08 12:31:17 +02001280 mce_banks_t all_banks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 u64 cap;
1282 int i;
1283
Andi Kleenb79109c2009-02-12 13:43:23 +01001284 /*
1285 * Log the machine checks left over from the previous reset.
1286 */
Andi Kleenee031c32009-02-12 13:49:34 +01001287 bitmap_fill(all_banks, MAX_NR_BANKS);
Andi Kleen5679af42009-04-07 17:06:55 +02001288 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290 set_in_cr4(X86_CR4_MCE);
1291
Andi Kleen0d7482e32009-02-17 23:07:13 +01001292 rdmsrl(MSR_IA32_MCG_CAP, cap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 if (cap & MCG_CTL_P)
1294 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1295
1296 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02001297 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02001298
Andi Kleencebe1822009-07-09 00:31:43 +02001299 if (!b->init)
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001300 continue;
Andi Kleena2d32bc2009-07-09 00:31:44 +02001301 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1302 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304}
1305
1306/* Add per CPU specific workarounds here */
Borislav Petkov5e099542009-10-16 12:31:32 +02001307static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001308{
Ingo Molnare412cd22009-08-17 10:19:00 +02001309 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1310 pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
1311 return -EOPNOTSUPP;
1312 }
1313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 /* This should be disabled by the BIOS, but isn't always */
Jan Beulich911f6a72008-04-22 16:22:21 +01001315 if (c->x86_vendor == X86_VENDOR_AMD) {
Ingo Molnare9eee032009-04-08 12:31:17 +02001316 if (c->x86 == 15 && banks > 4) {
1317 /*
1318 * disable GART TBL walk error reporting, which
1319 * trips off incorrectly with the IOMMU & 3ware
1320 * & Cerberus:
1321 */
Andi Kleencebe1822009-07-09 00:31:43 +02001322 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
Ingo Molnare9eee032009-04-08 12:31:17 +02001323 }
1324 if (c->x86 <= 17 && mce_bootlog < 0) {
1325 /*
1326 * Lots of broken BIOS around that don't clear them
1327 * by default and leave crap in there. Don't log:
1328 */
Jan Beulich911f6a72008-04-22 16:22:21 +01001329 mce_bootlog = 0;
Ingo Molnare9eee032009-04-08 12:31:17 +02001330 }
Andi Kleen2e6f6942009-04-27 18:42:48 +02001331 /*
1332 * Various K7s with broken bank 0 around. Always disable
1333 * by default.
1334 */
Andi Kleen203abd62009-06-15 14:52:01 +02001335 if (c->x86 == 6 && banks > 0)
Andi Kleencebe1822009-07-09 00:31:43 +02001336 mce_banks[0].ctl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 }
Andi Kleene5835382005-11-05 17:25:54 +01001338
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001339 if (c->x86_vendor == X86_VENDOR_INTEL) {
1340 /*
1341 * SDM documents that on family 6 bank 0 should not be written
1342 * because it aliases to another special BIOS controlled
1343 * register.
1344 * But it's not aliased anymore on model 0x1a+
1345 * Don't ignore bank 0 completely because there could be a
1346 * valid event later, merely don't write CTL0.
1347 */
1348
Andi Kleencebe1822009-07-09 00:31:43 +02001349 if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
1350 mce_banks[0].init = 0;
Andi Kleen3c079792009-05-27 21:56:55 +02001351
1352 /*
1353 * All newer Intel systems support MCE broadcasting. Enable
1354 * synchronization with a one second timeout.
1355 */
1356 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1357 monarch_timeout < 0)
1358 monarch_timeout = USEC_PER_SEC;
Bartlomiej Zolnierkiewiczc7f6fa42009-07-28 23:52:54 +02001359
Ingo Molnare412cd22009-08-17 10:19:00 +02001360 /*
1361 * There are also broken BIOSes on some Pentium M and
1362 * earlier systems:
1363 */
1364 if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
Bartlomiej Zolnierkiewiczc7f6fa42009-07-28 23:52:54 +02001365 mce_bootlog = 0;
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001366 }
Andi Kleen3c079792009-05-27 21:56:55 +02001367 if (monarch_timeout < 0)
1368 monarch_timeout = 0;
Andi Kleen29b0f592009-05-27 21:56:56 +02001369 if (mce_bootlog != 0)
1370 mce_panic_timeout = 30;
Ingo Molnare412cd22009-08-17 10:19:00 +02001371
1372 return 0;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001373}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
Borislav Petkov5e099542009-10-16 12:31:32 +02001375static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
Andi Kleen4efc0672009-04-28 19:07:31 +02001376{
1377 if (c->x86 != 5)
1378 return;
1379 switch (c->x86_vendor) {
1380 case X86_VENDOR_INTEL:
Hidetoshi Setoc6978362009-06-15 17:22:49 +09001381 intel_p5_mcheck_init(c);
Andi Kleen4efc0672009-04-28 19:07:31 +02001382 break;
1383 case X86_VENDOR_CENTAUR:
1384 winchip_mcheck_init(c);
1385 break;
1386 }
1387}
1388
Borislav Petkov5e099542009-10-16 12:31:32 +02001389static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390{
1391 switch (c->x86_vendor) {
1392 case X86_VENDOR_INTEL:
1393 mce_intel_feature_init(c);
1394 break;
Jacob Shin89b831e2005-11-05 17:25:53 +01001395 case X86_VENDOR_AMD:
1396 mce_amd_feature_init(c);
1397 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 default:
1399 break;
1400 }
1401}
1402
Borislav Petkov5e099542009-10-16 12:31:32 +02001403static void __mcheck_cpu_init_timer(void)
Andi Kleen52d168e2009-02-12 13:39:29 +01001404{
1405 struct timer_list *t = &__get_cpu_var(mce_timer);
Tejun Heo245b2e72009-06-24 15:13:48 +09001406 int *n = &__get_cpu_var(mce_next_interval);
Andi Kleen52d168e2009-02-12 13:39:29 +01001407
Jan Beulichbc09eff2009-12-08 11:21:37 +09001408 setup_timer(t, mce_start_timer, smp_processor_id());
1409
Hidetoshi Seto62fdac52009-06-11 16:06:07 +09001410 if (mce_ignore_ce)
1411 return;
1412
Andi Kleen6298c512009-04-09 12:28:22 +02001413 *n = check_interval * HZ;
1414 if (!*n)
Andi Kleen52d168e2009-02-12 13:39:29 +01001415 return;
Andi Kleen6298c512009-04-09 12:28:22 +02001416 t->expires = round_jiffies(jiffies + *n);
Hidetoshi Seto5be60662009-06-24 09:21:10 +09001417 add_timer_on(t, smp_processor_id());
Andi Kleen52d168e2009-02-12 13:39:29 +01001418}
1419
Andi Kleen9eda8cb2009-07-09 00:31:42 +02001420/* Handle unconfigured int18 (should never happen) */
1421static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1422{
1423 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
1424 smp_processor_id());
1425}
1426
1427/* Call the installed machine check handler for this CPU setup. */
1428void (*machine_check_vector)(struct pt_regs *, long error_code) =
1429 unexpected_machine_check;
1430
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001431/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 * Called for each booted CPU to set up machine checks.
Ingo Molnare9eee032009-04-08 12:31:17 +02001433 * Must be called with preempt off:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 */
Borislav Petkov5e099542009-10-16 12:31:32 +02001435void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
Andi Kleen4efc0672009-04-28 19:07:31 +02001437 if (mce_disabled)
1438 return;
1439
Borislav Petkov5e099542009-10-16 12:31:32 +02001440 __mcheck_cpu_ancient_init(c);
Andi Kleen4efc0672009-04-28 19:07:31 +02001441
Andi Kleen5b4408f2009-02-12 13:39:30 +01001442 if (!mce_available(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 return;
1444
Borislav Petkov5e099542009-10-16 12:31:32 +02001445 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
Andi Kleen04b2b1a2009-04-28 22:50:19 +02001446 mce_disabled = 1;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001447 return;
1448 }
Andi Kleen0d7482e32009-02-17 23:07:13 +01001449
Andi Kleen5d727922009-04-27 19:25:48 +02001450 machine_check_vector = do_machine_check;
1451
Borislav Petkov5e099542009-10-16 12:31:32 +02001452 __mcheck_cpu_init_generic();
1453 __mcheck_cpu_init_vendor(c);
1454 __mcheck_cpu_init_timer();
Andi Kleen9b1beaf2009-05-27 21:56:59 +02001455 INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
Borislav Petkovfb253192009-10-07 13:20:38 +02001456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457}
1458
1459/*
1460 * Character device to read and clear the MCE log.
1461 */
1462
Tim Hockinf528e7b2007-07-21 17:10:35 +02001463static DEFINE_SPINLOCK(mce_state_lock);
Ingo Molnare9eee032009-04-08 12:31:17 +02001464static int open_count; /* #times opened */
1465static int open_exclu; /* already open exclusive? */
Tim Hockinf528e7b2007-07-21 17:10:35 +02001466
1467static int mce_open(struct inode *inode, struct file *file)
1468{
1469 spin_lock(&mce_state_lock);
1470
1471 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
1472 spin_unlock(&mce_state_lock);
Ingo Molnare9eee032009-04-08 12:31:17 +02001473
Tim Hockinf528e7b2007-07-21 17:10:35 +02001474 return -EBUSY;
1475 }
1476
1477 if (file->f_flags & O_EXCL)
1478 open_exclu = 1;
1479 open_count++;
1480
1481 spin_unlock(&mce_state_lock);
1482
Tim Hockinbd784322007-07-21 17:10:37 +02001483 return nonseekable_open(inode, file);
Tim Hockinf528e7b2007-07-21 17:10:35 +02001484}
1485
1486static int mce_release(struct inode *inode, struct file *file)
1487{
1488 spin_lock(&mce_state_lock);
1489
1490 open_count--;
1491 open_exclu = 0;
1492
1493 spin_unlock(&mce_state_lock);
1494
1495 return 0;
1496}
1497
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001498static void collect_tscs(void *data)
1499{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 unsigned long *cpu_tsc = (unsigned long *)data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001502 rdtscll(cpu_tsc[smp_processor_id()]);
1503}
1504
Huang Ying482908b2010-05-18 14:35:22 +08001505static int mce_apei_read_done;
1506
1507/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1508static int __mce_read_apei(char __user **ubuf, size_t usize)
1509{
1510 int rc;
1511 u64 record_id;
1512 struct mce m;
1513
1514 if (usize < sizeof(struct mce))
1515 return -EINVAL;
1516
1517 rc = apei_read_mce(&m, &record_id);
1518 /* Error or no more MCE record */
1519 if (rc <= 0) {
1520 mce_apei_read_done = 1;
1521 return rc;
1522 }
1523 rc = -EFAULT;
1524 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1525 return rc;
1526 /*
1527 * In fact, we should have cleared the record after that has
1528 * been flushed to the disk or sent to network in
1529 * /sbin/mcelog, but we have no interface to support that now,
1530 * so just clear it to avoid duplication.
1531 */
1532 rc = apei_clear_mce(record_id);
1533 if (rc) {
1534 mce_apei_read_done = 1;
1535 return rc;
1536 }
1537 *ubuf += sizeof(struct mce);
1538
1539 return 0;
1540}
1541
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001542static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1543 loff_t *off)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 char __user *buf = ubuf;
Ingo Molnare9eee032009-04-08 12:31:17 +02001546 unsigned long *cpu_tsc;
1547 unsigned prev, next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 int i, err;
1549
Mike Travis6bca67f2008-07-18 18:11:27 -07001550 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
Andi Kleenf0de53b2005-04-16 15:25:10 -07001551 if (!cpu_tsc)
1552 return -ENOMEM;
1553
Daniel Walker8c8b8852008-01-30 13:31:17 +01001554 mutex_lock(&mce_read_mutex);
Huang Ying482908b2010-05-18 14:35:22 +08001555
1556 if (!mce_apei_read_done) {
1557 err = __mce_read_apei(&buf, usize);
1558 if (err || buf != ubuf)
1559 goto out;
1560 }
1561
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -08001562 next = rcu_dereference_check_mce(mcelog.next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
1564 /* Only supports full reads right now */
Huang Ying482908b2010-05-18 14:35:22 +08001565 err = -EINVAL;
1566 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1567 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
1569 err = 0;
Huang Yingef41df4342009-02-12 13:39:34 +01001570 prev = 0;
1571 do {
1572 for (i = prev; i < next; i++) {
1573 unsigned long start = jiffies;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001574
Huang Yingef41df4342009-02-12 13:39:34 +01001575 while (!mcelog.entry[i].finished) {
1576 if (time_after_eq(jiffies, start + 2)) {
1577 memset(mcelog.entry + i, 0,
1578 sizeof(struct mce));
1579 goto timeout;
1580 }
1581 cpu_relax();
Andi Kleen673242c2005-09-12 18:49:24 +02001582 }
Huang Yingef41df4342009-02-12 13:39:34 +01001583 smp_rmb();
1584 err |= copy_to_user(buf, mcelog.entry + i,
1585 sizeof(struct mce));
1586 buf += sizeof(struct mce);
1587timeout:
1588 ;
Andi Kleen673242c2005-09-12 18:49:24 +02001589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Huang Yingef41df4342009-02-12 13:39:34 +01001591 memset(mcelog.entry + prev, 0,
1592 (next - prev) * sizeof(struct mce));
1593 prev = next;
1594 next = cmpxchg(&mcelog.next, prev, 0);
1595 } while (next != prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
Paul E. McKenneyb2b18662005-06-25 14:55:38 -07001597 synchronize_sched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001599 /*
1600 * Collect entries that were still getting written before the
1601 * synchronize.
1602 */
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001603 on_each_cpu(collect_tscs, cpu_tsc, 1);
Ingo Molnare9eee032009-04-08 12:31:17 +02001604
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001605 for (i = next; i < MCE_LOG_LEN; i++) {
1606 if (mcelog.entry[i].finished &&
1607 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
1608 err |= copy_to_user(buf, mcelog.entry+i,
1609 sizeof(struct mce));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 smp_rmb();
1611 buf += sizeof(struct mce);
1612 memset(&mcelog.entry[i], 0, sizeof(struct mce));
1613 }
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001614 }
Huang Ying482908b2010-05-18 14:35:22 +08001615
1616 if (err)
1617 err = -EFAULT;
1618
1619out:
Daniel Walker8c8b8852008-01-30 13:31:17 +01001620 mutex_unlock(&mce_read_mutex);
Andi Kleenf0de53b2005-04-16 15:25:10 -07001621 kfree(cpu_tsc);
Ingo Molnare9eee032009-04-08 12:31:17 +02001622
Huang Ying482908b2010-05-18 14:35:22 +08001623 return err ? err : buf - ubuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624}
1625
Tim Hockine02e68d2007-07-21 17:10:36 +02001626static unsigned int mce_poll(struct file *file, poll_table *wait)
1627{
1628 poll_wait(file, &mce_wait, wait);
Paul E. McKenneyf56e8a02010-03-05 15:03:27 -08001629 if (rcu_dereference_check_mce(mcelog.next))
Tim Hockine02e68d2007-07-21 17:10:36 +02001630 return POLLIN | POLLRDNORM;
Huang Ying482908b2010-05-18 14:35:22 +08001631 if (!mce_apei_read_done && apei_check_mce())
1632 return POLLIN | POLLRDNORM;
Tim Hockine02e68d2007-07-21 17:10:36 +02001633 return 0;
1634}
1635
Nikanth Karthikesanc68461b2008-01-30 13:32:59 +01001636static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637{
1638 int __user *p = (int __user *)arg;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001639
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 if (!capable(CAP_SYS_ADMIN))
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001641 return -EPERM;
Ingo Molnare9eee032009-04-08 12:31:17 +02001642
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 switch (cmd) {
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001644 case MCE_GET_RECORD_LEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 return put_user(sizeof(struct mce), p);
1646 case MCE_GET_LOG_LEN:
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001647 return put_user(MCE_LOG_LEN, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 case MCE_GETCLEAR_FLAGS: {
1649 unsigned flags;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001650
1651 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 flags = mcelog.flags;
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001653 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
Ingo Molnare9eee032009-04-08 12:31:17 +02001654
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001655 return put_user(flags, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 }
1657 default:
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001658 return -ENOTTY;
1659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
1661
H. Peter Anvina1ff41b2009-05-25 22:16:14 -07001662/* Modified in mce-inject.c, so not static or const */
Andi Kleenea149b32009-04-29 19:31:00 +02001663struct file_operations mce_chrdev_ops = {
Ingo Molnare9eee032009-04-08 12:31:17 +02001664 .open = mce_open,
1665 .release = mce_release,
1666 .read = mce_read,
1667 .poll = mce_poll,
1668 .unlocked_ioctl = mce_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669};
Andi Kleenea149b32009-04-29 19:31:00 +02001670EXPORT_SYMBOL_GPL(mce_chrdev_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
1672static struct miscdevice mce_log_device = {
1673 MISC_MCELOG_MINOR,
1674 "mcelog",
1675 &mce_chrdev_ops,
1676};
1677
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001678/*
Hidetoshi Seto62fdac52009-06-11 16:06:07 +09001679 * mce=off Disables machine check
1680 * mce=no_cmci Disables CMCI
1681 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1682 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
Andi Kleen3c079792009-05-27 21:56:55 +02001683 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1684 * monarchtimeout is how long to wait for other CPUs on machine
1685 * check, or 0 to not wait
Hidetoshi Seto13503fa2009-03-26 17:39:20 +09001686 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1687 * mce=nobootlog Don't log MCEs from before booting.
1688 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689static int __init mcheck_enable(char *str)
1690{
Bartlomiej Zolnierkiewicze3346fc2009-07-28 23:55:09 +02001691 if (*str == 0) {
Andi Kleen4efc0672009-04-28 19:07:31 +02001692 enable_p5_mce();
Bartlomiej Zolnierkiewicze3346fc2009-07-28 23:55:09 +02001693 return 1;
1694 }
Andi Kleen4efc0672009-04-28 19:07:31 +02001695 if (*str == '=')
1696 str++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 if (!strcmp(str, "off"))
Andi Kleen04b2b1a2009-04-28 22:50:19 +02001698 mce_disabled = 1;
Hidetoshi Seto62fdac52009-06-11 16:06:07 +09001699 else if (!strcmp(str, "no_cmci"))
1700 mce_cmci_disabled = 1;
1701 else if (!strcmp(str, "dont_log_ce"))
1702 mce_dont_log_ce = 1;
1703 else if (!strcmp(str, "ignore_ce"))
1704 mce_ignore_ce = 1;
Hidetoshi Seto13503fa2009-03-26 17:39:20 +09001705 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1706 mce_bootlog = (str[0] == 'b');
Andi Kleen3c079792009-05-27 21:56:55 +02001707 else if (isdigit(str[0])) {
Andi Kleen8c566ef2005-09-12 18:49:24 +02001708 get_option(&str, &tolerant);
Andi Kleen3c079792009-05-27 21:56:55 +02001709 if (*str == ',') {
1710 ++str;
1711 get_option(&str, &monarch_timeout);
1712 }
1713 } else {
Andi Kleen4efc0672009-04-28 19:07:31 +02001714 printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
Hidetoshi Seto13503fa2009-03-26 17:39:20 +09001715 str);
1716 return 0;
1717 }
OGAWA Hirofumi9b410462006-03-31 02:30:33 -08001718 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719}
Andi Kleen4efc0672009-04-28 19:07:31 +02001720__setup("mce", mcheck_enable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Yong Wanga2202aa2009-11-10 09:38:24 +08001722int __init mcheck_init(void)
Borislav Petkovb33a6362009-10-16 12:31:33 +02001723{
1724 atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb);
1725
Yong Wanga2202aa2009-11-10 09:38:24 +08001726 mcheck_intel_therm_init();
1727
Borislav Petkovb33a6362009-10-16 12:31:33 +02001728 return 0;
1729}
Borislav Petkovb33a6362009-10-16 12:31:33 +02001730
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001731/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 * Sysfs support
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001733 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
Andi Kleen973a2dd2009-02-12 13:39:32 +01001735/*
1736 * Disable machine checks on suspend and shutdown. We can't really handle
1737 * them later.
1738 */
Borislav Petkov5e099542009-10-16 12:31:32 +02001739static int mce_disable_error_reporting(void)
Andi Kleen973a2dd2009-02-12 13:39:32 +01001740{
1741 int i;
1742
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001743 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02001744 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02001745
Andi Kleencebe1822009-07-09 00:31:43 +02001746 if (b->init)
Andi Kleena2d32bc2009-07-09 00:31:44 +02001747 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
Andi Kleen06b7a7a2009-04-27 18:37:43 +02001748 }
Andi Kleen973a2dd2009-02-12 13:39:32 +01001749 return 0;
1750}
1751
1752static int mce_suspend(struct sys_device *dev, pm_message_t state)
1753{
Borislav Petkov5e099542009-10-16 12:31:32 +02001754 return mce_disable_error_reporting();
Andi Kleen973a2dd2009-02-12 13:39:32 +01001755}
1756
1757static int mce_shutdown(struct sys_device *dev)
1758{
Borislav Petkov5e099542009-10-16 12:31:32 +02001759 return mce_disable_error_reporting();
Andi Kleen973a2dd2009-02-12 13:39:32 +01001760}
1761
Ingo Molnare9eee032009-04-08 12:31:17 +02001762/*
1763 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
1764 * Only one CPU is active at this time, the others get re-added later using
1765 * CPU hotplug:
1766 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767static int mce_resume(struct sys_device *dev)
1768{
Borislav Petkov5e099542009-10-16 12:31:32 +02001769 __mcheck_cpu_init_generic();
1770 __mcheck_cpu_init_vendor(&current_cpu_data);
Ingo Molnare9eee032009-04-08 12:31:17 +02001771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 return 0;
1773}
1774
Andi Kleen52d168e2009-02-12 13:39:29 +01001775static void mce_cpu_restart(void *data)
1776{
1777 del_timer_sync(&__get_cpu_var(mce_timer));
Hidetoshi Seto33edbf02009-06-15 17:18:45 +09001778 if (!mce_available(&current_cpu_data))
1779 return;
Borislav Petkov5e099542009-10-16 12:31:32 +02001780 __mcheck_cpu_init_generic();
1781 __mcheck_cpu_init_timer();
Andi Kleen52d168e2009-02-12 13:39:29 +01001782}
1783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784/* Reinit MCEs after user configuration changes */
Thomas Gleixnerd88203d2007-10-23 22:37:23 +02001785static void mce_restart(void)
1786{
Andi Kleen52d168e2009-02-12 13:39:29 +01001787 on_each_cpu(mce_cpu_restart, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788}
1789
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09001790/* Toggle features for corrected errors */
1791static void mce_disable_ce(void *all)
1792{
1793 if (!mce_available(&current_cpu_data))
1794 return;
1795 if (all)
1796 del_timer_sync(&__get_cpu_var(mce_timer));
1797 cmci_clear();
1798}
1799
1800static void mce_enable_ce(void *all)
1801{
1802 if (!mce_available(&current_cpu_data))
1803 return;
1804 cmci_reenable();
1805 cmci_recheck();
1806 if (all)
Borislav Petkov5e099542009-10-16 12:31:32 +02001807 __mcheck_cpu_init_timer();
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09001808}
1809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810static struct sysdev_class mce_sysclass = {
Ingo Molnare9eee032009-04-08 12:31:17 +02001811 .suspend = mce_suspend,
1812 .shutdown = mce_shutdown,
1813 .resume = mce_resume,
1814 .name = "machinecheck",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815};
1816
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001817DEFINE_PER_CPU(struct sys_device, mce_dev);
Ingo Molnare9eee032009-04-08 12:31:17 +02001818
1819__cpuinitdata
1820void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
Andi Kleencebe1822009-07-09 00:31:43 +02001822static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
1823{
1824 return container_of(attr, struct mce_bank, attr);
1825}
Andi Kleen0d7482e32009-02-17 23:07:13 +01001826
1827static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
1828 char *buf)
1829{
Andi Kleencebe1822009-07-09 00:31:43 +02001830 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
Andi Kleen0d7482e32009-02-17 23:07:13 +01001831}
1832
1833static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
Hidetoshi Seto9319cec2009-04-14 17:26:30 +09001834 const char *buf, size_t size)
Andi Kleen0d7482e32009-02-17 23:07:13 +01001835{
Hidetoshi Seto9319cec2009-04-14 17:26:30 +09001836 u64 new;
Ingo Molnare9eee032009-04-08 12:31:17 +02001837
Hidetoshi Seto9319cec2009-04-14 17:26:30 +09001838 if (strict_strtoull(buf, 0, &new) < 0)
Andi Kleen0d7482e32009-02-17 23:07:13 +01001839 return -EINVAL;
Ingo Molnare9eee032009-04-08 12:31:17 +02001840
Andi Kleencebe1822009-07-09 00:31:43 +02001841 attr_to_bank(attr)->ctl = new;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001842 mce_restart();
Ingo Molnare9eee032009-04-08 12:31:17 +02001843
Hidetoshi Seto9319cec2009-04-14 17:26:30 +09001844 return size;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001845}
Andi Kleena98f0dd2007-02-13 13:26:23 +01001846
Ingo Molnare9eee032009-04-08 12:31:17 +02001847static ssize_t
1848show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
Andi Kleena98f0dd2007-02-13 13:26:23 +01001849{
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001850 strcpy(buf, mce_helper);
Andi Kleena98f0dd2007-02-13 13:26:23 +01001851 strcat(buf, "\n");
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001852 return strlen(mce_helper) + 1;
Andi Kleena98f0dd2007-02-13 13:26:23 +01001853}
1854
Andi Kleen4a0b2b42008-07-01 18:48:41 +02001855static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
Ingo Molnare9eee032009-04-08 12:31:17 +02001856 const char *buf, size_t siz)
Andi Kleena98f0dd2007-02-13 13:26:23 +01001857{
1858 char *p;
Ingo Molnare9eee032009-04-08 12:31:17 +02001859
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001860 strncpy(mce_helper, buf, sizeof(mce_helper));
1861 mce_helper[sizeof(mce_helper)-1] = 0;
Hidetoshi Seto1020bcb2009-06-15 17:20:57 +09001862 p = strchr(mce_helper, '\n');
Ingo Molnare9eee032009-04-08 12:31:17 +02001863
Jan Beuliche9084ec2009-07-16 09:45:11 +01001864 if (p)
Ingo Molnare9eee032009-04-08 12:31:17 +02001865 *p = 0;
1866
Jan Beuliche9084ec2009-07-16 09:45:11 +01001867 return strlen(mce_helper) + !!p;
Andi Kleena98f0dd2007-02-13 13:26:23 +01001868}
1869
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09001870static ssize_t set_ignore_ce(struct sys_device *s,
1871 struct sysdev_attribute *attr,
1872 const char *buf, size_t size)
1873{
1874 u64 new;
1875
1876 if (strict_strtoull(buf, 0, &new) < 0)
1877 return -EINVAL;
1878
1879 if (mce_ignore_ce ^ !!new) {
1880 if (new) {
1881 /* disable ce features */
1882 on_each_cpu(mce_disable_ce, (void *)1, 1);
1883 mce_ignore_ce = 1;
1884 } else {
1885 /* enable ce features */
1886 mce_ignore_ce = 0;
1887 on_each_cpu(mce_enable_ce, (void *)1, 1);
1888 }
1889 }
1890 return size;
1891}
1892
1893static ssize_t set_cmci_disabled(struct sys_device *s,
1894 struct sysdev_attribute *attr,
1895 const char *buf, size_t size)
1896{
1897 u64 new;
1898
1899 if (strict_strtoull(buf, 0, &new) < 0)
1900 return -EINVAL;
1901
1902 if (mce_cmci_disabled ^ !!new) {
1903 if (new) {
1904 /* disable cmci */
1905 on_each_cpu(mce_disable_ce, NULL, 1);
1906 mce_cmci_disabled = 1;
1907 } else {
1908 /* enable cmci */
1909 mce_cmci_disabled = 0;
1910 on_each_cpu(mce_enable_ce, NULL, 1);
1911 }
1912 }
1913 return size;
1914}
1915
Andi Kleenb56f6422009-05-27 21:56:52 +02001916static ssize_t store_int_with_restart(struct sys_device *s,
1917 struct sysdev_attribute *attr,
1918 const char *buf, size_t size)
1919{
1920 ssize_t ret = sysdev_store_int(s, attr, buf, size);
1921 mce_restart();
1922 return ret;
1923}
1924
Andi Kleena98f0dd2007-02-13 13:26:23 +01001925static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
Andi Kleend95d62c2008-07-01 18:48:43 +02001926static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
Andi Kleen3c079792009-05-27 21:56:55 +02001927static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09001928static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
Ingo Molnare9eee032009-04-08 12:31:17 +02001929
Andi Kleenb56f6422009-05-27 21:56:52 +02001930static struct sysdev_ext_attribute attr_check_interval = {
1931 _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
1932 store_int_with_restart),
1933 &check_interval
1934};
Ingo Molnare9eee032009-04-08 12:31:17 +02001935
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09001936static struct sysdev_ext_attribute attr_ignore_ce = {
1937 _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
1938 &mce_ignore_ce
1939};
1940
1941static struct sysdev_ext_attribute attr_cmci_disabled = {
Yinghai Lu74b602c2009-06-17 14:43:32 -07001942 _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09001943 &mce_cmci_disabled
1944};
1945
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001946static struct sysdev_attribute *mce_attrs[] = {
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09001947 &attr_tolerant.attr,
1948 &attr_check_interval.attr,
1949 &attr_trigger,
Andi Kleen3c079792009-05-27 21:56:55 +02001950 &attr_monarch_timeout.attr,
Hidetoshi Seto9af43b52009-06-15 17:21:36 +09001951 &attr_dont_log_ce.attr,
1952 &attr_ignore_ce.attr,
1953 &attr_cmci_disabled.attr,
Andi Kleena98f0dd2007-02-13 13:26:23 +01001954 NULL
1955};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001957static cpumask_var_t mce_dev_initialized;
Andreas Herrmannbae19fe2007-11-14 17:00:44 -08001958
Ingo Molnare9eee032009-04-08 12:31:17 +02001959/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
Andi Kleen91c6d402005-07-28 21:15:39 -07001960static __cpuinit int mce_create_device(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961{
1962 int err;
Hidetoshi Setob1f49f92009-06-18 14:53:24 +09001963 int i, j;
Mike Travis92cb7612007-10-19 20:35:04 +02001964
Andreas Herrmann90367552007-11-07 02:12:58 +01001965 if (!mce_available(&boot_cpu_data))
Andi Kleen91c6d402005-07-28 21:15:39 -07001966 return -EIO;
1967
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001968 memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject));
1969 per_cpu(mce_dev, cpu).id = cpu;
1970 per_cpu(mce_dev, cpu).cls = &mce_sysclass;
Andi Kleen91c6d402005-07-28 21:15:39 -07001971
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001972 err = sysdev_register(&per_cpu(mce_dev, cpu));
Akinobu Mitad435d862007-10-18 03:05:15 -07001973 if (err)
1974 return err;
Andi Kleen91c6d402005-07-28 21:15:39 -07001975
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001976 for (i = 0; mce_attrs[i]; i++) {
1977 err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
Akinobu Mitad435d862007-10-18 03:05:15 -07001978 if (err)
1979 goto error;
Andi Kleen91c6d402005-07-28 21:15:39 -07001980 }
Hidetoshi Setob1f49f92009-06-18 14:53:24 +09001981 for (j = 0; j < banks; j++) {
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001982 err = sysdev_create_file(&per_cpu(mce_dev, cpu),
Andi Kleencebe1822009-07-09 00:31:43 +02001983 &mce_banks[j].attr);
Andi Kleen0d7482e32009-02-17 23:07:13 +01001984 if (err)
1985 goto error2;
1986 }
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001987 cpumask_set_cpu(cpu, mce_dev_initialized);
Akinobu Mitad435d862007-10-18 03:05:15 -07001988
1989 return 0;
Andi Kleen0d7482e32009-02-17 23:07:13 +01001990error2:
Hidetoshi Setob1f49f92009-06-18 14:53:24 +09001991 while (--j >= 0)
Andi Kleencebe1822009-07-09 00:31:43 +02001992 sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr);
Akinobu Mitad435d862007-10-18 03:05:15 -07001993error:
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001994 while (--i >= 0)
Hidetoshi Seto5c0e9f22009-12-08 16:52:44 +09001995 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
Ingo Molnarcb491fc2009-04-08 12:31:17 +02001996
1997 sysdev_unregister(&per_cpu(mce_dev, cpu));
Akinobu Mitad435d862007-10-18 03:05:15 -07001998
Andi Kleen91c6d402005-07-28 21:15:39 -07001999 return err;
2000}
2001
Jan Beulich2d9cd6c2008-08-29 13:15:04 +01002002static __cpuinit void mce_remove_device(unsigned int cpu)
Andi Kleen91c6d402005-07-28 21:15:39 -07002003{
Shaohua Li73ca5352006-01-11 22:43:06 +01002004 int i;
2005
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002006 if (!cpumask_test_cpu(cpu, mce_dev_initialized))
Andreas Herrmannbae19fe2007-11-14 17:00:44 -08002007 return;
2008
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002009 for (i = 0; mce_attrs[i]; i++)
2010 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
2011
Andi Kleen0d7482e32009-02-17 23:07:13 +01002012 for (i = 0; i < banks; i++)
Andi Kleencebe1822009-07-09 00:31:43 +02002013 sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr);
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002014
2015 sysdev_unregister(&per_cpu(mce_dev, cpu));
2016 cpumask_clear_cpu(cpu, mce_dev_initialized);
Andi Kleen91c6d402005-07-28 21:15:39 -07002017}
Andi Kleen91c6d402005-07-28 21:15:39 -07002018
Andi Kleend6b75582009-02-12 13:39:31 +01002019/* Make sure there are no machine checks on offlined CPUs. */
Hidetoshi Seto767df1b2009-11-26 17:29:02 +09002020static void __cpuinit mce_disable_cpu(void *h)
Andi Kleend6b75582009-02-12 13:39:31 +01002021{
Andi Kleen88ccbed2009-02-12 13:49:36 +01002022 unsigned long action = *(unsigned long *)h;
Ingo Molnarcb491fc2009-04-08 12:31:17 +02002023 int i;
Andi Kleend6b75582009-02-12 13:39:31 +01002024
2025 if (!mce_available(&current_cpu_data))
2026 return;
Hidetoshi Seto767df1b2009-11-26 17:29:02 +09002027
Andi Kleen88ccbed2009-02-12 13:49:36 +01002028 if (!(action & CPU_TASKS_FROZEN))
2029 cmci_clear();
Andi Kleen06b7a7a2009-04-27 18:37:43 +02002030 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02002031 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02002032
Andi Kleencebe1822009-07-09 00:31:43 +02002033 if (b->init)
Andi Kleena2d32bc2009-07-09 00:31:44 +02002034 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
Andi Kleen06b7a7a2009-04-27 18:37:43 +02002035 }
Andi Kleend6b75582009-02-12 13:39:31 +01002036}
2037
Hidetoshi Seto767df1b2009-11-26 17:29:02 +09002038static void __cpuinit mce_reenable_cpu(void *h)
Andi Kleend6b75582009-02-12 13:39:31 +01002039{
Andi Kleen88ccbed2009-02-12 13:49:36 +01002040 unsigned long action = *(unsigned long *)h;
Ingo Molnare9eee032009-04-08 12:31:17 +02002041 int i;
Andi Kleend6b75582009-02-12 13:39:31 +01002042
2043 if (!mce_available(&current_cpu_data))
2044 return;
Ingo Molnare9eee032009-04-08 12:31:17 +02002045
Andi Kleen88ccbed2009-02-12 13:49:36 +01002046 if (!(action & CPU_TASKS_FROZEN))
2047 cmci_reenable();
Andi Kleen06b7a7a2009-04-27 18:37:43 +02002048 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02002049 struct mce_bank *b = &mce_banks[i];
Ingo Molnar11868a22009-09-23 17:49:55 +02002050
Andi Kleencebe1822009-07-09 00:31:43 +02002051 if (b->init)
Andi Kleena2d32bc2009-07-09 00:31:44 +02002052 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
Andi Kleen06b7a7a2009-04-27 18:37:43 +02002053 }
Andi Kleend6b75582009-02-12 13:39:31 +01002054}
2055
Andi Kleen91c6d402005-07-28 21:15:39 -07002056/* Get notified when a cpu comes on/off. Be hotplug friendly. */
Ingo Molnare9eee032009-04-08 12:31:17 +02002057static int __cpuinit
2058mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
Andi Kleen91c6d402005-07-28 21:15:39 -07002059{
2060 unsigned int cpu = (unsigned long)hcpu;
Andi Kleen52d168e2009-02-12 13:39:29 +01002061 struct timer_list *t = &per_cpu(mce_timer, cpu);
Andi Kleen91c6d402005-07-28 21:15:39 -07002062
2063 switch (action) {
Andreas Herrmannbae19fe2007-11-14 17:00:44 -08002064 case CPU_ONLINE:
2065 case CPU_ONLINE_FROZEN:
2066 mce_create_device(cpu);
Rafael J. Wysocki87357282008-08-22 22:23:09 +02002067 if (threshold_cpu_callback)
2068 threshold_cpu_callback(action, cpu);
Andi Kleen91c6d402005-07-28 21:15:39 -07002069 break;
Andi Kleen91c6d402005-07-28 21:15:39 -07002070 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07002071 case CPU_DEAD_FROZEN:
Rafael J. Wysocki87357282008-08-22 22:23:09 +02002072 if (threshold_cpu_callback)
2073 threshold_cpu_callback(action, cpu);
Andi Kleen91c6d402005-07-28 21:15:39 -07002074 mce_remove_device(cpu);
2075 break;
Andi Kleen52d168e2009-02-12 13:39:29 +01002076 case CPU_DOWN_PREPARE:
2077 case CPU_DOWN_PREPARE_FROZEN:
2078 del_timer_sync(t);
Andi Kleen88ccbed2009-02-12 13:49:36 +01002079 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
Andi Kleen52d168e2009-02-12 13:39:29 +01002080 break;
2081 case CPU_DOWN_FAILED:
2082 case CPU_DOWN_FAILED_FROZEN:
Hidetoshi Setofe5ed912009-12-03 11:33:08 +09002083 if (!mce_ignore_ce && check_interval) {
2084 t->expires = round_jiffies(jiffies +
Tejun Heo245b2e72009-06-24 15:13:48 +09002085 __get_cpu_var(mce_next_interval));
Hidetoshi Setofe5ed912009-12-03 11:33:08 +09002086 add_timer_on(t, cpu);
2087 }
Andi Kleen88ccbed2009-02-12 13:49:36 +01002088 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2089 break;
2090 case CPU_POST_DEAD:
2091 /* intentionally ignoring frozen here */
2092 cmci_rediscover(cpu);
Andi Kleen52d168e2009-02-12 13:39:29 +01002093 break;
Andi Kleen91c6d402005-07-28 21:15:39 -07002094 }
Andreas Herrmannbae19fe2007-11-14 17:00:44 -08002095 return NOTIFY_OK;
Andi Kleen91c6d402005-07-28 21:15:39 -07002096}
2097
Sam Ravnborg1e356692008-01-30 13:33:36 +01002098static struct notifier_block mce_cpu_notifier __cpuinitdata = {
Andi Kleen91c6d402005-07-28 21:15:39 -07002099 .notifier_call = mce_cpu_callback,
2100};
2101
Andi Kleencebe1822009-07-09 00:31:43 +02002102static __init void mce_init_banks(void)
Andi Kleen0d7482e32009-02-17 23:07:13 +01002103{
2104 int i;
2105
Andi Kleen0d7482e32009-02-17 23:07:13 +01002106 for (i = 0; i < banks; i++) {
Andi Kleencebe1822009-07-09 00:31:43 +02002107 struct mce_bank *b = &mce_banks[i];
2108 struct sysdev_attribute *a = &b->attr;
Ingo Molnare9eee032009-04-08 12:31:17 +02002109
Eric W. Biedermana07e4152010-02-11 15:23:05 -08002110 sysfs_attr_init(&a->attr);
Andi Kleencebe1822009-07-09 00:31:43 +02002111 a->attr.name = b->attrname;
2112 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
Ingo Molnare9eee032009-04-08 12:31:17 +02002113
2114 a->attr.mode = 0644;
2115 a->show = show_bank;
2116 a->store = set_bank;
Andi Kleen0d7482e32009-02-17 23:07:13 +01002117 }
Andi Kleen0d7482e32009-02-17 23:07:13 +01002118}
2119
Borislav Petkov5e099542009-10-16 12:31:32 +02002120static __init int mcheck_init_device(void)
Andi Kleen91c6d402005-07-28 21:15:39 -07002121{
2122 int err;
2123 int i = 0;
2124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 if (!mce_available(&boot_cpu_data))
2126 return -EIO;
Andi Kleen0d7482e32009-02-17 23:07:13 +01002127
Yinghai Lue92fae02009-06-17 16:21:33 -07002128 zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
Rusty Russell996867d2009-03-13 14:49:51 +10302129
Andi Kleencebe1822009-07-09 00:31:43 +02002130 mce_init_banks();
Andi Kleen0d7482e32009-02-17 23:07:13 +01002131
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 err = sysdev_class_register(&mce_sysclass);
Akinobu Mitad435d862007-10-18 03:05:15 -07002133 if (err)
2134 return err;
Andi Kleen91c6d402005-07-28 21:15:39 -07002135
2136 for_each_online_cpu(i) {
Akinobu Mitad435d862007-10-18 03:05:15 -07002137 err = mce_create_device(i);
2138 if (err)
2139 return err;
Andi Kleen91c6d402005-07-28 21:15:39 -07002140 }
2141
Chandra Seetharamanbe6b5a32006-07-30 03:03:37 -07002142 register_hotcpu_notifier(&mce_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 misc_register(&mce_log_device);
Ingo Molnare9eee032009-04-08 12:31:17 +02002144
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146}
Andi Kleen91c6d402005-07-28 21:15:39 -07002147
Borislav Petkov5e099542009-10-16 12:31:32 +02002148device_initcall(mcheck_init_device);
Ingo Molnara988d332009-04-08 12:31:25 +02002149
Andi Kleend7c3c9a2009-04-28 23:07:25 +02002150/*
2151 * Old style boot options parsing. Only for compatibility.
2152 */
2153static int __init mcheck_disable(char *str)
2154{
2155 mce_disabled = 1;
2156 return 1;
2157}
2158__setup("nomce", mcheck_disable);
Huang Ying5be9ed22009-07-31 09:41:42 +08002159
2160#ifdef CONFIG_DEBUG_FS
2161struct dentry *mce_get_debugfs_dir(void)
2162{
2163 static struct dentry *dmce;
2164
2165 if (!dmce)
2166 dmce = debugfs_create_dir("mce", NULL);
2167
2168 return dmce;
2169}
Huang Yingbf783f92009-07-31 09:41:43 +08002170
2171static void mce_reset(void)
2172{
2173 cpu_missing = 0;
2174 atomic_set(&mce_fake_paniced, 0);
2175 atomic_set(&mce_executing, 0);
2176 atomic_set(&mce_callin, 0);
2177 atomic_set(&global_nwo, 0);
2178}
2179
2180static int fake_panic_get(void *data, u64 *val)
2181{
2182 *val = fake_panic;
2183 return 0;
2184}
2185
2186static int fake_panic_set(void *data, u64 val)
2187{
2188 mce_reset();
2189 fake_panic = val;
2190 return 0;
2191}
2192
2193DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2194 fake_panic_set, "%llu\n");
2195
Borislav Petkov5e099542009-10-16 12:31:32 +02002196static int __init mcheck_debugfs_init(void)
Huang Yingbf783f92009-07-31 09:41:43 +08002197{
2198 struct dentry *dmce, *ffake_panic;
2199
2200 dmce = mce_get_debugfs_dir();
2201 if (!dmce)
2202 return -ENOMEM;
2203 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2204 &fake_panic_fops);
2205 if (!ffake_panic)
2206 return -ENOMEM;
2207
2208 return 0;
2209}
Borislav Petkov5e099542009-10-16 12:31:32 +02002210late_initcall(mcheck_debugfs_init);
Huang Ying5be9ed22009-07-31 09:41:42 +08002211#endif