Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Machine check handler. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 5 | * Rest from unknown author(s). |
| 6 | * 2004 Andi Kleen. Rewrote most of it. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 7 | * Copyright 2008 Intel Corporation |
| 8 | * Author: Andi Kleen |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 10 | |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 13 | #include <linux/thread_info.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 14 | #include <linux/capability.h> |
| 15 | #include <linux/miscdevice.h> |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 16 | #include <linux/ratelimit.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 17 | #include <linux/kallsyms.h> |
| 18 | #include <linux/rcupdate.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 19 | #include <linux/kobject.h> |
Hidetoshi Seto | 14a0253 | 2009-04-30 16:04:51 +0900 | [diff] [blame] | 20 | #include <linux/uaccess.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 21 | #include <linux/kdebug.h> |
| 22 | #include <linux/kernel.h> |
| 23 | #include <linux/percpu.h> |
| 24 | #include <linux/string.h> |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 25 | #include <linux/device.h> |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 26 | #include <linux/syscore_ops.h> |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 27 | #include <linux/delay.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 28 | #include <linux/ctype.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/sysfs.h> |
| 31 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 32 | #include <linux/slab.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 33 | #include <linux/init.h> |
| 34 | #include <linux/kmod.h> |
| 35 | #include <linux/poll.h> |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 36 | #include <linux/nmi.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 37 | #include <linux/cpu.h> |
Hidetoshi Seto | 14a0253 | 2009-04-30 16:04:51 +0900 | [diff] [blame] | 38 | #include <linux/smp.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 39 | #include <linux/fs.h> |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 40 | #include <linux/mm.h> |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 41 | #include <linux/debugfs.h> |
Hidetoshi Seto | b77e70b | 2011-06-08 10:56:02 +0900 | [diff] [blame] | 42 | #include <linux/irq_work.h> |
Paul Gortmaker | 69c60c8 | 2011-05-26 12:22:53 -0400 | [diff] [blame] | 43 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 45 | #include <asm/processor.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 46 | #include <asm/mce.h> |
| 47 | #include <asm/msr.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 48 | |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 49 | #include "mce-internal.h" |
Ingo Molnar | 711c2e4 | 2009-04-08 12:31:26 +0200 | [diff] [blame] | 50 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 51 | static DEFINE_MUTEX(mce_chrdev_read_mutex); |
Ingo Molnar | 2aa2b50dd | 2010-03-14 08:57:03 +0100 | [diff] [blame] | 52 | |
Paul E. McKenney | f56e8a0 | 2010-03-05 15:03:27 -0800 | [diff] [blame] | 53 | #define rcu_dereference_check_mce(p) \ |
Paul E. McKenney | ec8c27e | 2010-04-30 06:45:36 -0700 | [diff] [blame] | 54 | rcu_dereference_index_check((p), \ |
Paul E. McKenney | f56e8a0 | 2010-03-05 15:03:27 -0800 | [diff] [blame] | 55 | rcu_read_lock_sched_held() || \ |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 56 | lockdep_is_held(&mce_chrdev_read_mutex)) |
Paul E. McKenney | f56e8a0 | 2010-03-05 15:03:27 -0800 | [diff] [blame] | 57 | |
Hidetoshi Seto | 8968f9d | 2009-10-13 16:19:41 +0900 | [diff] [blame] | 58 | #define CREATE_TRACE_POINTS |
| 59 | #include <trace/events/mce.h> |
| 60 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 61 | #define SPINUNIT 100 /* 100ns */ |
| 62 | |
Andi Kleen | 553f265 | 2006-04-07 19:49:57 +0200 | [diff] [blame] | 63 | atomic_t mce_entry; |
| 64 | |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 65 | DEFINE_PER_CPU(unsigned, mce_exception_count); |
| 66 | |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 67 | struct mce_bank *mce_banks __read_mostly; |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 68 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 69 | struct mca_config mca_cfg __read_mostly = { |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 70 | .bootlog = -1, |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 71 | /* |
| 72 | * Tolerant levels: |
| 73 | * 0: always panic on uncorrected errors, log corrected errors |
| 74 | * 1: panic or SIGBUS on uncorrected errors, log corrected errors |
| 75 | * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors |
| 76 | * 3: never panic or SIGBUS, log all errors (for testing only) |
| 77 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 78 | .tolerant = 1, |
| 79 | .monarch_timeout = -1 |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 80 | }; |
| 81 | |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 82 | /* User mode helper program triggered by machine check event */ |
| 83 | static unsigned long mce_need_notify; |
| 84 | static char mce_helper[128]; |
| 85 | static char *mce_helper_argv[2] = { mce_helper, NULL }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 87 | static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); |
| 88 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 89 | static DEFINE_PER_CPU(struct mce, mces_seen); |
| 90 | static int cpu_missing; |
| 91 | |
Andi Kleen | ee031c3 | 2009-02-12 13:49:34 +0100 | [diff] [blame] | 92 | /* MCA banks polled by the period polling timer for corrected events */ |
| 93 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
| 94 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL |
| 95 | }; |
| 96 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 97 | static DEFINE_PER_CPU(struct work_struct, mce_work); |
| 98 | |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 99 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); |
| 100 | |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 101 | /* |
| 102 | * CPU/chipset specific EDAC code can register a notifier call here to print |
| 103 | * MCE errors in a human-readable form. |
| 104 | */ |
| 105 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); |
| 106 | |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 107 | /* Do initial initialization of a struct mce */ |
| 108 | void mce_setup(struct mce *m) |
| 109 | { |
| 110 | memset(m, 0, sizeof(struct mce)); |
Andi Kleen | d620c67 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 111 | m->cpu = m->extcpu = smp_processor_id(); |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 112 | rdtscll(m->tsc); |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 113 | /* We hope get_seconds stays lockless */ |
| 114 | m->time = get_seconds(); |
| 115 | m->cpuvendor = boot_cpu_data.x86_vendor; |
| 116 | m->cpuid = cpuid_eax(1); |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 117 | m->socketid = cpu_data(m->extcpu).phys_proc_id; |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 118 | m->apicid = cpu_data(m->extcpu).initial_apicid; |
| 119 | rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 120 | } |
| 121 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 122 | DEFINE_PER_CPU(struct mce, injectm); |
| 123 | EXPORT_PER_CPU_SYMBOL_GPL(injectm); |
| 124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | /* |
| 126 | * Lockless MCE logging infrastructure. |
| 127 | * This avoids deadlocks on printk locks without having to break locks. Also |
| 128 | * separate MCEs from kernel messages to avoid bogus bug reports. |
| 129 | */ |
| 130 | |
Adrian Bunk | 231fd90 | 2008-01-30 13:30:30 +0100 | [diff] [blame] | 131 | static struct mce_log mcelog = { |
Andi Kleen | f6fb0ac | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 132 | .signature = MCE_LOG_SIGNATURE, |
| 133 | .len = MCE_LOG_LEN, |
| 134 | .recordlen = sizeof(struct mce), |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 135 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
| 137 | void mce_log(struct mce *mce) |
| 138 | { |
| 139 | unsigned next, entry; |
Borislav Petkov | f0cb545 | 2011-07-18 11:24:45 -0300 | [diff] [blame] | 140 | int ret = 0; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 141 | |
Hidetoshi Seto | 8968f9d | 2009-10-13 16:19:41 +0900 | [diff] [blame] | 142 | /* Emit the trace record: */ |
| 143 | trace_mce_record(mce); |
| 144 | |
Borislav Petkov | f0cb545 | 2011-07-18 11:24:45 -0300 | [diff] [blame] | 145 | ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); |
| 146 | if (ret == NOTIFY_STOP) |
| 147 | return; |
| 148 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | mce->finished = 0; |
Mike Waychison | 7644143 | 2005-09-30 00:01:27 +0200 | [diff] [blame] | 150 | wmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | for (;;) { |
Paul E. McKenney | f56e8a0 | 2010-03-05 15:03:27 -0800 | [diff] [blame] | 152 | entry = rcu_dereference_check_mce(mcelog.next); |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 153 | for (;;) { |
Mauro Carvalho Chehab | 696e409 | 2009-07-23 06:57:45 -0300 | [diff] [blame] | 154 | |
| 155 | /* |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 156 | * When the buffer fills up discard new entries. |
| 157 | * Assume that the earlier errors are the more |
| 158 | * interesting ones: |
| 159 | */ |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 160 | if (entry >= MCE_LOG_LEN) { |
Hidetoshi Seto | 14a0253 | 2009-04-30 16:04:51 +0900 | [diff] [blame] | 161 | set_bit(MCE_OVERFLOW, |
| 162 | (unsigned long *)&mcelog.flags); |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 163 | return; |
| 164 | } |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 165 | /* Old left over entry. Skip: */ |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 166 | if (mcelog.entry[entry].finished) { |
| 167 | entry++; |
| 168 | continue; |
| 169 | } |
Mike Waychison | 7644143 | 2005-09-30 00:01:27 +0200 | [diff] [blame] | 170 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | smp_rmb(); |
| 173 | next = entry + 1; |
| 174 | if (cmpxchg(&mcelog.next, entry, next) == entry) |
| 175 | break; |
| 176 | } |
| 177 | memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); |
Mike Waychison | 7644143 | 2005-09-30 00:01:27 +0200 | [diff] [blame] | 178 | wmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | mcelog.entry[entry].finished = 1; |
Mike Waychison | 7644143 | 2005-09-30 00:01:27 +0200 | [diff] [blame] | 180 | wmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 182 | mce->finished = 1; |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 183 | set_bit(0, &mce_need_notify); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | } |
| 185 | |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 186 | static void drain_mcelog_buffer(void) |
| 187 | { |
| 188 | unsigned int next, i, prev = 0; |
| 189 | |
Srivatsa S. Bhat | b11e3d7 | 2012-03-07 11:44:29 +0100 | [diff] [blame] | 190 | next = ACCESS_ONCE(mcelog.next); |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 191 | |
| 192 | do { |
| 193 | struct mce *m; |
| 194 | |
| 195 | /* drain what was logged during boot */ |
| 196 | for (i = prev; i < next; i++) { |
| 197 | unsigned long start = jiffies; |
| 198 | unsigned retries = 1; |
| 199 | |
| 200 | m = &mcelog.entry[i]; |
| 201 | |
| 202 | while (!m->finished) { |
| 203 | if (time_after_eq(jiffies, start + 2*retries)) |
| 204 | retries++; |
| 205 | |
| 206 | cpu_relax(); |
| 207 | |
| 208 | if (!m->finished && retries >= 4) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 209 | pr_err("skipping error being logged currently!\n"); |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 210 | break; |
| 211 | } |
| 212 | } |
| 213 | smp_rmb(); |
| 214 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); |
| 215 | } |
| 216 | |
| 217 | memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m)); |
| 218 | prev = next; |
| 219 | next = cmpxchg(&mcelog.next, prev, 0); |
| 220 | } while (next != prev); |
| 221 | } |
| 222 | |
| 223 | |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 224 | void mce_register_decode_chain(struct notifier_block *nb) |
| 225 | { |
| 226 | atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 227 | drain_mcelog_buffer(); |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 228 | } |
| 229 | EXPORT_SYMBOL_GPL(mce_register_decode_chain); |
| 230 | |
| 231 | void mce_unregister_decode_chain(struct notifier_block *nb) |
| 232 | { |
| 233 | atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb); |
| 234 | } |
| 235 | EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); |
| 236 | |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 237 | static void print_mce(struct mce *m) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | { |
Borislav Petkov | dffa4b2 | 2011-04-20 12:23:49 +0200 | [diff] [blame] | 239 | int ret = 0; |
| 240 | |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 241 | pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", |
Andi Kleen | d620c67 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 242 | m->extcpu, m->mcgstatus, m->bank, m->status); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 243 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 244 | if (m->ip) { |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 245 | pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 246 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", |
| 247 | m->cs, m->ip); |
| 248 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | if (m->cs == __KERNEL_CS) |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 250 | print_symbol("{%s}", m->ip); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 251 | pr_cont("\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | } |
Borislav Petkov | 549d042 | 2009-07-24 13:51:42 +0200 | [diff] [blame] | 253 | |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 254 | pr_emerg(HW_ERR "TSC %llx ", m->tsc); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 255 | if (m->addr) |
| 256 | pr_cont("ADDR %llx ", m->addr); |
| 257 | if (m->misc) |
| 258 | pr_cont("MISC %llx ", m->misc); |
| 259 | |
| 260 | pr_cont("\n"); |
Andi Kleen | 506ed6b | 2011-10-12 17:46:33 -0700 | [diff] [blame] | 261 | /* |
| 262 | * Note this output is parsed by external tools and old fields |
| 263 | * should not be changed. |
| 264 | */ |
Borislav Petkov | 881e23e | 2011-10-17 16:45:10 +0200 | [diff] [blame] | 265 | pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", |
Andi Kleen | 506ed6b | 2011-10-12 17:46:33 -0700 | [diff] [blame] | 266 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, |
| 267 | cpu_data(m->extcpu).microcode); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 268 | |
| 269 | /* |
| 270 | * Print out human-readable details about the MCE error, |
Borislav Petkov | fb25319 | 2009-10-07 13:20:38 +0200 | [diff] [blame] | 271 | * (if the CPU has an implementation for that) |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 272 | */ |
Borislav Petkov | dffa4b2 | 2011-04-20 12:23:49 +0200 | [diff] [blame] | 273 | ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); |
| 274 | if (ret == NOTIFY_STOP) |
| 275 | return; |
| 276 | |
| 277 | pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); |
Andi Kleen | 8650356 | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 278 | } |
| 279 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 280 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
| 281 | |
| 282 | static atomic_t mce_paniced; |
| 283 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 284 | static int fake_panic; |
| 285 | static atomic_t mce_fake_paniced; |
| 286 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 287 | /* Panic in progress. Enable interrupts and wait for final IPI */ |
| 288 | static void wait_for_panic(void) |
| 289 | { |
| 290 | long timeout = PANIC_TIMEOUT*USEC_PER_SEC; |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 291 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 292 | preempt_disable(); |
| 293 | local_irq_enable(); |
| 294 | while (timeout-- > 0) |
| 295 | udelay(1); |
Andi Kleen | 29b0f59 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 296 | if (panic_timeout == 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 297 | panic_timeout = mca_cfg.panic_timeout; |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 298 | panic("Panicing machine check CPU died"); |
| 299 | } |
| 300 | |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 301 | static void mce_panic(char *msg, struct mce *final, char *exp) |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 302 | { |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 303 | int i, apei_err = 0; |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 304 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 305 | if (!fake_panic) { |
| 306 | /* |
| 307 | * Make sure only one CPU runs in machine check panic |
| 308 | */ |
| 309 | if (atomic_inc_return(&mce_paniced) > 1) |
| 310 | wait_for_panic(); |
| 311 | barrier(); |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 312 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 313 | bust_spinlocks(1); |
| 314 | console_verbose(); |
| 315 | } else { |
| 316 | /* Don't log too much for fake panic */ |
| 317 | if (atomic_inc_return(&mce_fake_paniced) > 1) |
| 318 | return; |
| 319 | } |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 320 | /* First print corrected ones that are still unlogged */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | for (i = 0; i < MCE_LOG_LEN; i++) { |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 322 | struct mce *m = &mcelog.entry[i]; |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 323 | if (!(m->status & MCI_STATUS_VAL)) |
| 324 | continue; |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 325 | if (!(m->status & MCI_STATUS_UC)) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 326 | print_mce(m); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 327 | if (!apei_err) |
| 328 | apei_err = apei_write_mce(m); |
| 329 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | } |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 331 | /* Now print uncorrected but with the final one last */ |
| 332 | for (i = 0; i < MCE_LOG_LEN; i++) { |
| 333 | struct mce *m = &mcelog.entry[i]; |
| 334 | if (!(m->status & MCI_STATUS_VAL)) |
| 335 | continue; |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 336 | if (!(m->status & MCI_STATUS_UC)) |
| 337 | continue; |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 338 | if (!final || memcmp(m, final, sizeof(struct mce))) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 339 | print_mce(m); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 340 | if (!apei_err) |
| 341 | apei_err = apei_write_mce(m); |
| 342 | } |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 343 | } |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 344 | if (final) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 345 | print_mce(final); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 346 | if (!apei_err) |
| 347 | apei_err = apei_write_mce(final); |
| 348 | } |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 349 | if (cpu_missing) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 350 | pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n"); |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 351 | if (exp) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 352 | pr_emerg(HW_ERR "Machine check: %s\n", exp); |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 353 | if (!fake_panic) { |
| 354 | if (panic_timeout == 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 355 | panic_timeout = mca_cfg.panic_timeout; |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 356 | panic(msg); |
| 357 | } else |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 358 | pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 359 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 361 | /* Support code for software error injection */ |
| 362 | |
| 363 | static int msr_to_offset(u32 msr) |
| 364 | { |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 365 | unsigned bank = __this_cpu_read(injectm.bank); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 366 | |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 367 | if (msr == mca_cfg.rip_msr) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 368 | return offsetof(struct mce, ip); |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 369 | if (msr == MSR_IA32_MCx_STATUS(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 370 | return offsetof(struct mce, status); |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 371 | if (msr == MSR_IA32_MCx_ADDR(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 372 | return offsetof(struct mce, addr); |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 373 | if (msr == MSR_IA32_MCx_MISC(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 374 | return offsetof(struct mce, misc); |
| 375 | if (msr == MSR_IA32_MCG_STATUS) |
| 376 | return offsetof(struct mce, mcgstatus); |
| 377 | return -1; |
| 378 | } |
| 379 | |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 380 | /* MSR access wrappers used for error injection */ |
| 381 | static u64 mce_rdmsrl(u32 msr) |
| 382 | { |
| 383 | u64 v; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 384 | |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 385 | if (__this_cpu_read(injectm.finished)) { |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 386 | int offset = msr_to_offset(msr); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 387 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 388 | if (offset < 0) |
| 389 | return 0; |
| 390 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); |
| 391 | } |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 392 | |
| 393 | if (rdmsrl_safe(msr, &v)) { |
| 394 | WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr); |
| 395 | /* |
| 396 | * Return zero in case the access faulted. This should |
| 397 | * not happen normally but can happen if the CPU does |
| 398 | * something weird, or if the code is buggy. |
| 399 | */ |
| 400 | v = 0; |
| 401 | } |
| 402 | |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 403 | return v; |
| 404 | } |
| 405 | |
| 406 | static void mce_wrmsrl(u32 msr, u64 v) |
| 407 | { |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 408 | if (__this_cpu_read(injectm.finished)) { |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 409 | int offset = msr_to_offset(msr); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 410 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 411 | if (offset >= 0) |
| 412 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; |
| 413 | return; |
| 414 | } |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 415 | wrmsrl(msr, v); |
| 416 | } |
| 417 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 418 | /* |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 419 | * Collect all global (w.r.t. this processor) status about this machine |
| 420 | * check into our "mce" struct so that we can use it later to assess |
| 421 | * the severity of the problem as we read per-bank specific details. |
| 422 | */ |
| 423 | static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) |
| 424 | { |
| 425 | mce_setup(m); |
| 426 | |
| 427 | m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); |
| 428 | if (regs) { |
| 429 | /* |
| 430 | * Get the address of the instruction at the time of |
| 431 | * the machine check error. |
| 432 | */ |
| 433 | if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { |
| 434 | m->ip = regs->ip; |
| 435 | m->cs = regs->cs; |
Andi Kleen | a129a7c | 2010-11-19 13:16:22 +0100 | [diff] [blame] | 436 | |
| 437 | /* |
| 438 | * When in VM86 mode make the cs look like ring 3 |
| 439 | * always. This is a lie, but it's better than passing |
| 440 | * the additional vm86 bit around everywhere. |
| 441 | */ |
| 442 | if (v8086_mode(regs)) |
| 443 | m->cs |= 3; |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 444 | } |
| 445 | /* Use accurate RIP reporting if available. */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 446 | if (mca_cfg.rip_msr) |
| 447 | m->ip = mce_rdmsrl(mca_cfg.rip_msr); |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 448 | } |
| 449 | } |
| 450 | |
| 451 | /* |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 452 | * Simple lockless ring to communicate PFNs from the exception handler with the |
| 453 | * process context work function. This is vastly simplified because there's |
| 454 | * only a single reader and a single writer. |
| 455 | */ |
| 456 | #define MCE_RING_SIZE 16 /* we use one entry less */ |
| 457 | |
| 458 | struct mce_ring { |
| 459 | unsigned short start; |
| 460 | unsigned short end; |
| 461 | unsigned long ring[MCE_RING_SIZE]; |
| 462 | }; |
| 463 | static DEFINE_PER_CPU(struct mce_ring, mce_ring); |
| 464 | |
| 465 | /* Runs with CPU affinity in workqueue */ |
| 466 | static int mce_ring_empty(void) |
| 467 | { |
| 468 | struct mce_ring *r = &__get_cpu_var(mce_ring); |
| 469 | |
| 470 | return r->start == r->end; |
| 471 | } |
| 472 | |
| 473 | static int mce_ring_get(unsigned long *pfn) |
| 474 | { |
| 475 | struct mce_ring *r; |
| 476 | int ret = 0; |
| 477 | |
| 478 | *pfn = 0; |
| 479 | get_cpu(); |
| 480 | r = &__get_cpu_var(mce_ring); |
| 481 | if (r->start == r->end) |
| 482 | goto out; |
| 483 | *pfn = r->ring[r->start]; |
| 484 | r->start = (r->start + 1) % MCE_RING_SIZE; |
| 485 | ret = 1; |
| 486 | out: |
| 487 | put_cpu(); |
| 488 | return ret; |
| 489 | } |
| 490 | |
| 491 | /* Always runs in MCE context with preempt off */ |
| 492 | static int mce_ring_add(unsigned long pfn) |
| 493 | { |
| 494 | struct mce_ring *r = &__get_cpu_var(mce_ring); |
| 495 | unsigned next; |
| 496 | |
| 497 | next = (r->end + 1) % MCE_RING_SIZE; |
| 498 | if (next == r->start) |
| 499 | return -1; |
| 500 | r->ring[r->end] = pfn; |
| 501 | wmb(); |
| 502 | r->end = next; |
| 503 | return 0; |
| 504 | } |
| 505 | |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 506 | int mce_available(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 508 | if (mca_cfg.disabled) |
Andi Kleen | 5b4408f | 2009-02-12 13:39:30 +0100 | [diff] [blame] | 509 | return 0; |
Akinobu Mita | 3d1712c | 2006-03-24 03:15:11 -0800 | [diff] [blame] | 510 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | } |
| 512 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 513 | static void mce_schedule_work(void) |
| 514 | { |
Tejun Heo | 4d899be | 2012-12-21 17:57:05 -0800 | [diff] [blame] | 515 | if (!mce_ring_empty()) |
| 516 | schedule_work(&__get_cpu_var(mce_work)); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 517 | } |
| 518 | |
Hidetoshi Seto | b77e70b | 2011-06-08 10:56:02 +0900 | [diff] [blame] | 519 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
| 520 | |
| 521 | static void mce_irq_work_cb(struct irq_work *entry) |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 522 | { |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 523 | mce_notify_irq(); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 524 | mce_schedule_work(); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 525 | } |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 526 | |
| 527 | static void mce_report_event(struct pt_regs *regs) |
| 528 | { |
| 529 | if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 530 | mce_notify_irq(); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 531 | /* |
| 532 | * Triggering the work queue here is just an insurance |
| 533 | * policy in case the syscall exit notify handler |
| 534 | * doesn't run soon enough or ends up running on the |
| 535 | * wrong CPU (can happen when audit sleeps) |
| 536 | */ |
| 537 | mce_schedule_work(); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 538 | return; |
| 539 | } |
| 540 | |
Hidetoshi Seto | b77e70b | 2011-06-08 10:56:02 +0900 | [diff] [blame] | 541 | irq_work_queue(&__get_cpu_var(mce_irq_work)); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 542 | } |
| 543 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 544 | /* |
| 545 | * Read ADDR and MISC registers. |
| 546 | */ |
| 547 | static void mce_read_aux(struct mce *m, int i) |
| 548 | { |
| 549 | if (m->status & MCI_STATUS_MISCV) |
| 550 | m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i)); |
| 551 | if (m->status & MCI_STATUS_ADDRV) { |
| 552 | m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i)); |
| 553 | |
| 554 | /* |
| 555 | * Mask the reported address by the reported granularity. |
| 556 | */ |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 557 | if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 558 | u8 shift = MCI_MISC_ADDR_LSB(m->misc); |
| 559 | m->addr >>= shift; |
| 560 | m->addr <<= shift; |
| 561 | } |
| 562 | } |
| 563 | } |
| 564 | |
Andi Kleen | ca84f69 | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 565 | DEFINE_PER_CPU(unsigned, mce_poll_count); |
| 566 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 567 | /* |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 568 | * Poll for corrected events or events that happened before reset. |
| 569 | * Those are just logged through /dev/mcelog. |
| 570 | * |
| 571 | * This is executed in standard interrupt context. |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 572 | * |
| 573 | * Note: spec recommends to panic for fatal unsignalled |
| 574 | * errors here. However this would be quite problematic -- |
| 575 | * we would need to reimplement the Monarch handling and |
| 576 | * it would mess up the exclusion between exception handler |
| 577 | * and poll hander -- * so we skip this for now. |
| 578 | * These cases should not happen anyways, or only when the CPU |
| 579 | * is already totally * confused. In this case it's likely it will |
| 580 | * not fully execute the machine check handler either. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 581 | */ |
Andi Kleen | ee031c3 | 2009-02-12 13:49:34 +0100 | [diff] [blame] | 582 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 583 | { |
| 584 | struct mce m; |
| 585 | int i; |
| 586 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 587 | this_cpu_inc(mce_poll_count); |
Andi Kleen | ca84f69 | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 588 | |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 589 | mce_gather_info(&m, NULL); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 590 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 591 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 592 | if (!mce_banks[i].ctl || !test_bit(i, *b)) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 593 | continue; |
| 594 | |
| 595 | m.misc = 0; |
| 596 | m.addr = 0; |
| 597 | m.bank = i; |
| 598 | m.tsc = 0; |
| 599 | |
| 600 | barrier(); |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 601 | m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 602 | if (!(m.status & MCI_STATUS_VAL)) |
| 603 | continue; |
| 604 | |
| 605 | /* |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 606 | * Uncorrected or signalled events are handled by the exception |
| 607 | * handler when it is enabled, so don't process those here. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 608 | * |
| 609 | * TBD do the same check for MCI_STATUS_EN here? |
| 610 | */ |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 611 | if (!(flags & MCP_UC) && |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 612 | (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 613 | continue; |
| 614 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 615 | mce_read_aux(&m, i); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 616 | |
| 617 | if (!(flags & MCP_TIMESTAMP)) |
| 618 | m.tsc = 0; |
| 619 | /* |
| 620 | * Don't get the IP here because it's unlikely to |
| 621 | * have anything to do with the actual error location. |
| 622 | */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 623 | if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) |
Andi Kleen | 5679af4 | 2009-04-07 17:06:55 +0200 | [diff] [blame] | 624 | mce_log(&m); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 625 | |
| 626 | /* |
| 627 | * Clear state for this bank. |
| 628 | */ |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 629 | mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 630 | } |
| 631 | |
| 632 | /* |
| 633 | * Don't clear MCG_STATUS here because it's only defined for |
| 634 | * exceptions. |
| 635 | */ |
Andi Kleen | 88921be | 2009-05-27 21:56:51 +0200 | [diff] [blame] | 636 | |
| 637 | sync_core(); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 638 | } |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 639 | EXPORT_SYMBOL_GPL(machine_check_poll); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 640 | |
| 641 | /* |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 642 | * Do a quick check if any of the events requires a panic. |
| 643 | * This decides if we keep the events around or clear them. |
| 644 | */ |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 645 | static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, |
| 646 | struct pt_regs *regs) |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 647 | { |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 648 | int i, ret = 0; |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 649 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 650 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 651 | m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 652 | if (m->status & MCI_STATUS_VAL) { |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 653 | __set_bit(i, validp); |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 654 | if (quirk_no_way_out) |
| 655 | quirk_no_way_out(i, m, regs); |
| 656 | } |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 657 | if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY) |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 658 | ret = 1; |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 659 | } |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 660 | return ret; |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 661 | } |
| 662 | |
| 663 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 664 | * Variable to establish order between CPUs while scanning. |
| 665 | * Each CPU spins initially until executing is equal its number. |
| 666 | */ |
| 667 | static atomic_t mce_executing; |
| 668 | |
| 669 | /* |
| 670 | * Defines order of CPUs on entry. First CPU becomes Monarch. |
| 671 | */ |
| 672 | static atomic_t mce_callin; |
| 673 | |
| 674 | /* |
| 675 | * Check if a timeout waiting for other CPUs happened. |
| 676 | */ |
| 677 | static int mce_timed_out(u64 *t) |
| 678 | { |
| 679 | /* |
| 680 | * The others already did panic for some reason. |
| 681 | * Bail out like in a timeout. |
| 682 | * rmb() to tell the compiler that system_state |
| 683 | * might have been modified by someone else. |
| 684 | */ |
| 685 | rmb(); |
| 686 | if (atomic_read(&mce_paniced)) |
| 687 | wait_for_panic(); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 688 | if (!mca_cfg.monarch_timeout) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 689 | goto out; |
| 690 | if ((s64)*t < SPINUNIT) { |
| 691 | /* CHECKME: Make panic default for 1 too? */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 692 | if (mca_cfg.tolerant < 1) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 693 | mce_panic("Timeout synchronizing machine check over CPUs", |
| 694 | NULL, NULL); |
| 695 | cpu_missing = 1; |
| 696 | return 1; |
| 697 | } |
| 698 | *t -= SPINUNIT; |
| 699 | out: |
| 700 | touch_nmi_watchdog(); |
| 701 | return 0; |
| 702 | } |
| 703 | |
| 704 | /* |
| 705 | * The Monarch's reign. The Monarch is the CPU who entered |
| 706 | * the machine check handler first. It waits for the others to |
| 707 | * raise the exception too and then grades them. When any |
| 708 | * error is fatal panic. Only then let the others continue. |
| 709 | * |
| 710 | * The other CPUs entering the MCE handler will be controlled by the |
| 711 | * Monarch. They are called Subjects. |
| 712 | * |
| 713 | * This way we prevent any potential data corruption in a unrecoverable case |
| 714 | * and also makes sure always all CPU's errors are examined. |
| 715 | * |
Hidetoshi Seto | 680b6cf | 2009-08-26 16:20:36 +0900 | [diff] [blame] | 716 | * Also this detects the case of a machine check event coming from outer |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 717 | * space (not detected by any CPUs) In this case some external agent wants |
| 718 | * us to shut down, so panic too. |
| 719 | * |
| 720 | * The other CPUs might still decide to panic if the handler happens |
| 721 | * in a unrecoverable place, but in this case the system is in a semi-stable |
| 722 | * state and won't corrupt anything by itself. It's ok to let the others |
| 723 | * continue for a bit first. |
| 724 | * |
| 725 | * All the spin loops have timeouts; when a timeout happens a CPU |
| 726 | * typically elects itself to be Monarch. |
| 727 | */ |
| 728 | static void mce_reign(void) |
| 729 | { |
| 730 | int cpu; |
| 731 | struct mce *m = NULL; |
| 732 | int global_worst = 0; |
| 733 | char *msg = NULL; |
| 734 | char *nmsg = NULL; |
| 735 | |
| 736 | /* |
| 737 | * This CPU is the Monarch and the other CPUs have run |
| 738 | * through their handlers. |
| 739 | * Grade the severity of the errors of all the CPUs. |
| 740 | */ |
| 741 | for_each_possible_cpu(cpu) { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 742 | int severity = mce_severity(&per_cpu(mces_seen, cpu), |
| 743 | mca_cfg.tolerant, |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 744 | &nmsg); |
| 745 | if (severity > global_worst) { |
| 746 | msg = nmsg; |
| 747 | global_worst = severity; |
| 748 | m = &per_cpu(mces_seen, cpu); |
| 749 | } |
| 750 | } |
| 751 | |
| 752 | /* |
| 753 | * Cannot recover? Panic here then. |
| 754 | * This dumps all the mces in the log buffer and stops the |
| 755 | * other CPUs. |
| 756 | */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 757 | if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) |
Andi Kleen | ac96037 | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 758 | mce_panic("Fatal Machine check", m, msg); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 759 | |
| 760 | /* |
| 761 | * For UC somewhere we let the CPU who detects it handle it. |
| 762 | * Also must let continue the others, otherwise the handling |
| 763 | * CPU could deadlock on a lock. |
| 764 | */ |
| 765 | |
| 766 | /* |
| 767 | * No machine check event found. Must be some external |
| 768 | * source or one CPU is hung. Panic. |
| 769 | */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 770 | if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 771 | mce_panic("Machine check from unknown source", NULL, NULL); |
| 772 | |
| 773 | /* |
| 774 | * Now clear all the mces_seen so that they don't reappear on |
| 775 | * the next mce. |
| 776 | */ |
| 777 | for_each_possible_cpu(cpu) |
| 778 | memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); |
| 779 | } |
| 780 | |
| 781 | static atomic_t global_nwo; |
| 782 | |
| 783 | /* |
| 784 | * Start of Monarch synchronization. This waits until all CPUs have |
| 785 | * entered the exception handler and then determines if any of them |
| 786 | * saw a fatal event that requires panic. Then it executes them |
| 787 | * in the entry order. |
| 788 | * TBD double check parallel CPU hotunplug |
| 789 | */ |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 790 | static int mce_start(int *no_way_out) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 791 | { |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 792 | int order; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 793 | int cpus = num_online_cpus(); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 794 | u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 795 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 796 | if (!timeout) |
| 797 | return -1; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 798 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 799 | atomic_add(*no_way_out, &global_nwo); |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 800 | /* |
| 801 | * global_nwo should be updated before mce_callin |
| 802 | */ |
| 803 | smp_wmb(); |
Borislav Petkov | a95436e | 2009-06-20 23:28:22 -0700 | [diff] [blame] | 804 | order = atomic_inc_return(&mce_callin); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 805 | |
| 806 | /* |
| 807 | * Wait for everyone. |
| 808 | */ |
| 809 | while (atomic_read(&mce_callin) != cpus) { |
| 810 | if (mce_timed_out(&timeout)) { |
| 811 | atomic_set(&global_nwo, 0); |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 812 | return -1; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 813 | } |
| 814 | ndelay(SPINUNIT); |
| 815 | } |
| 816 | |
| 817 | /* |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 818 | * mce_callin should be read before global_nwo |
| 819 | */ |
| 820 | smp_rmb(); |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 821 | |
| 822 | if (order == 1) { |
| 823 | /* |
| 824 | * Monarch: Starts executing now, the others wait. |
| 825 | */ |
| 826 | atomic_set(&mce_executing, 1); |
| 827 | } else { |
| 828 | /* |
| 829 | * Subject: Now start the scanning loop one by one in |
| 830 | * the original callin order. |
| 831 | * This way when there are any shared banks it will be |
| 832 | * only seen by one CPU before cleared, avoiding duplicates. |
| 833 | */ |
| 834 | while (atomic_read(&mce_executing) < order) { |
| 835 | if (mce_timed_out(&timeout)) { |
| 836 | atomic_set(&global_nwo, 0); |
| 837 | return -1; |
| 838 | } |
| 839 | ndelay(SPINUNIT); |
| 840 | } |
| 841 | } |
| 842 | |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 843 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 844 | * Cache the global no_way_out state. |
| 845 | */ |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 846 | *no_way_out = atomic_read(&global_nwo); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 847 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 848 | return order; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 849 | } |
| 850 | |
| 851 | /* |
| 852 | * Synchronize between CPUs after main scanning loop. |
| 853 | * This invokes the bulk of the Monarch processing. |
| 854 | */ |
| 855 | static int mce_end(int order) |
| 856 | { |
| 857 | int ret = -1; |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 858 | u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 859 | |
| 860 | if (!timeout) |
| 861 | goto reset; |
| 862 | if (order < 0) |
| 863 | goto reset; |
| 864 | |
| 865 | /* |
| 866 | * Allow others to run. |
| 867 | */ |
| 868 | atomic_inc(&mce_executing); |
| 869 | |
| 870 | if (order == 1) { |
| 871 | /* CHECKME: Can this race with a parallel hotplug? */ |
| 872 | int cpus = num_online_cpus(); |
| 873 | |
| 874 | /* |
| 875 | * Monarch: Wait for everyone to go through their scanning |
| 876 | * loops. |
| 877 | */ |
| 878 | while (atomic_read(&mce_executing) <= cpus) { |
| 879 | if (mce_timed_out(&timeout)) |
| 880 | goto reset; |
| 881 | ndelay(SPINUNIT); |
| 882 | } |
| 883 | |
| 884 | mce_reign(); |
| 885 | barrier(); |
| 886 | ret = 0; |
| 887 | } else { |
| 888 | /* |
| 889 | * Subject: Wait for Monarch to finish. |
| 890 | */ |
| 891 | while (atomic_read(&mce_executing) != 0) { |
| 892 | if (mce_timed_out(&timeout)) |
| 893 | goto reset; |
| 894 | ndelay(SPINUNIT); |
| 895 | } |
| 896 | |
| 897 | /* |
| 898 | * Don't reset anything. That's done by the Monarch. |
| 899 | */ |
| 900 | return 0; |
| 901 | } |
| 902 | |
| 903 | /* |
| 904 | * Reset all global state. |
| 905 | */ |
| 906 | reset: |
| 907 | atomic_set(&global_nwo, 0); |
| 908 | atomic_set(&mce_callin, 0); |
| 909 | barrier(); |
| 910 | |
| 911 | /* |
| 912 | * Let others run again. |
| 913 | */ |
| 914 | atomic_set(&mce_executing, 0); |
| 915 | return ret; |
| 916 | } |
| 917 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 918 | /* |
| 919 | * Check if the address reported by the CPU is in a format we can parse. |
| 920 | * It would be possible to add code for most other cases, but all would |
| 921 | * be somewhat complicated (e.g. segment offset would require an instruction |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 922 | * parser). So only support physical addresses up to page granuality for now. |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 923 | */ |
| 924 | static int mce_usable_address(struct mce *m) |
| 925 | { |
| 926 | if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV)) |
| 927 | return 0; |
Hidetoshi Seto | 2b90e77 | 2011-06-08 10:56:56 +0900 | [diff] [blame] | 928 | if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 929 | return 0; |
Hidetoshi Seto | 2b90e77 | 2011-06-08 10:56:56 +0900 | [diff] [blame] | 930 | if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 931 | return 0; |
| 932 | return 1; |
| 933 | } |
| 934 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 935 | static void mce_clear_state(unsigned long *toclear) |
| 936 | { |
| 937 | int i; |
| 938 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 939 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 940 | if (test_bit(i, toclear)) |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 941 | mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 942 | } |
| 943 | } |
| 944 | |
| 945 | /* |
Tony Luck | af104e3 | 2011-12-14 15:55:20 -0800 | [diff] [blame] | 946 | * Need to save faulting physical address associated with a process |
| 947 | * in the machine check handler some place where we can grab it back |
| 948 | * later in mce_notify_process() |
| 949 | */ |
| 950 | #define MCE_INFO_MAX 16 |
| 951 | |
| 952 | struct mce_info { |
| 953 | atomic_t inuse; |
| 954 | struct task_struct *t; |
| 955 | __u64 paddr; |
Tony Luck | dad1743 | 2012-05-14 15:07:48 -0700 | [diff] [blame] | 956 | int restartable; |
Tony Luck | af104e3 | 2011-12-14 15:55:20 -0800 | [diff] [blame] | 957 | } mce_info[MCE_INFO_MAX]; |
| 958 | |
Tony Luck | dad1743 | 2012-05-14 15:07:48 -0700 | [diff] [blame] | 959 | static void mce_save_info(__u64 addr, int c) |
Tony Luck | af104e3 | 2011-12-14 15:55:20 -0800 | [diff] [blame] | 960 | { |
| 961 | struct mce_info *mi; |
| 962 | |
| 963 | for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) { |
| 964 | if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) { |
| 965 | mi->t = current; |
| 966 | mi->paddr = addr; |
Tony Luck | dad1743 | 2012-05-14 15:07:48 -0700 | [diff] [blame] | 967 | mi->restartable = c; |
Tony Luck | af104e3 | 2011-12-14 15:55:20 -0800 | [diff] [blame] | 968 | return; |
| 969 | } |
| 970 | } |
| 971 | |
| 972 | mce_panic("Too many concurrent recoverable errors", NULL, NULL); |
| 973 | } |
| 974 | |
| 975 | static struct mce_info *mce_find_info(void) |
| 976 | { |
| 977 | struct mce_info *mi; |
| 978 | |
| 979 | for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) |
| 980 | if (atomic_read(&mi->inuse) && mi->t == current) |
| 981 | return mi; |
| 982 | return NULL; |
| 983 | } |
| 984 | |
| 985 | static void mce_clear_info(struct mce_info *mi) |
| 986 | { |
| 987 | atomic_set(&mi->inuse, 0); |
| 988 | } |
| 989 | |
| 990 | /* |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 991 | * The actual machine check handler. This only handles real |
| 992 | * exceptions when something got corrupted coming in through int 18. |
| 993 | * |
| 994 | * This is executed in NMI context not subject to normal locking rules. This |
| 995 | * implies that most kernel services cannot be safely used. Don't even |
| 996 | * think about putting a printk in there! |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 997 | * |
| 998 | * On Intel systems this is entered on all CPUs in parallel through |
| 999 | * MCE broadcast. However some CPUs might be broken beyond repair, |
| 1000 | * so be always careful when synchronizing with others. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | */ |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1002 | void do_machine_check(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1004 | struct mca_config *cfg = &mca_cfg; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1005 | struct mce m, *final; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | int i; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1007 | int worst = 0; |
| 1008 | int severity; |
| 1009 | /* |
| 1010 | * Establish sequential order between the CPUs entering the machine |
| 1011 | * check handler. |
| 1012 | */ |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 1013 | int order; |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1014 | /* |
| 1015 | * If no_way_out gets set, there is no safe way to recover from this |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1016 | * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1017 | */ |
| 1018 | int no_way_out = 0; |
| 1019 | /* |
| 1020 | * If kill_it gets set, there might be a way to recover from this |
| 1021 | * error. |
| 1022 | */ |
| 1023 | int kill_it = 0; |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1024 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 1025 | DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1026 | char *msg = "Unknown"; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | |
Andi Kleen | 553f265 | 2006-04-07 19:49:57 +0200 | [diff] [blame] | 1028 | atomic_inc(&mce_entry); |
| 1029 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 1030 | this_cpu_inc(mce_exception_count); |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 1031 | |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1032 | if (!cfg->banks) |
Andi Kleen | 3256169 | 2009-05-27 21:56:53 +0200 | [diff] [blame] | 1033 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 | |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 1035 | mce_gather_info(&m, regs); |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 1036 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1037 | final = &__get_cpu_var(mces_seen); |
| 1038 | *final = m; |
| 1039 | |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 1040 | memset(valid_banks, 0, sizeof(valid_banks)); |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1041 | no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); |
Hidetoshi Seto | 680b6cf | 2009-08-26 16:20:36 +0900 | [diff] [blame] | 1042 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | barrier(); |
| 1044 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1045 | /* |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1046 | * When no restart IP might need to kill or panic. |
| 1047 | * Assume the worst for now, but if we find the |
| 1048 | * severity is MCE_AR_SEVERITY we have other options. |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1049 | */ |
| 1050 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) |
| 1051 | kill_it = 1; |
| 1052 | |
| 1053 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1054 | * Go through all the banks in exclusion of the other CPUs. |
| 1055 | * This way we don't report duplicated events on shared banks |
| 1056 | * because the first one to see it will clear it. |
| 1057 | */ |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 1058 | order = mce_start(&no_way_out); |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1059 | for (i = 0; i < cfg->banks; i++) { |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1060 | __clear_bit(i, toclear); |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 1061 | if (!test_bit(i, valid_banks)) |
| 1062 | continue; |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1063 | if (!mce_banks[i].ctl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | continue; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1065 | |
| 1066 | m.misc = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | m.addr = 0; |
| 1068 | m.bank = i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 1070 | m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | if ((m.status & MCI_STATUS_VAL) == 0) |
| 1072 | continue; |
| 1073 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1074 | /* |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1075 | * Non uncorrected or non signaled errors are handled by |
| 1076 | * machine_check_poll. Leave them alone, unless this panics. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1077 | */ |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1078 | if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1079 | !no_way_out) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1080 | continue; |
| 1081 | |
| 1082 | /* |
| 1083 | * Set taint even when machine check was not enabled. |
| 1084 | */ |
| 1085 | add_taint(TAINT_MACHINE_CHECK); |
| 1086 | |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1087 | severity = mce_severity(&m, cfg->tolerant, NULL); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1088 | |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1089 | /* |
| 1090 | * When machine check was for corrected handler don't touch, |
| 1091 | * unless we're panicing. |
| 1092 | */ |
| 1093 | if (severity == MCE_KEEP_SEVERITY && !no_way_out) |
| 1094 | continue; |
| 1095 | __set_bit(i, toclear); |
| 1096 | if (severity == MCE_NO_SEVERITY) { |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1097 | /* |
| 1098 | * Machine check event was not enabled. Clear, but |
| 1099 | * ignore. |
| 1100 | */ |
| 1101 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | } |
| 1103 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 1104 | mce_read_aux(&m, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1106 | /* |
| 1107 | * Action optional error. Queue address for later processing. |
| 1108 | * When the ring overflows we just ignore the AO error. |
| 1109 | * RED-PEN add some logging mechanism when |
| 1110 | * usable_address or mce_add_ring fails. |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1111 | * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0 |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1112 | */ |
| 1113 | if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) |
| 1114 | mce_ring_add(m.addr >> PAGE_SHIFT); |
| 1115 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1116 | mce_log(&m); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1118 | if (severity > worst) { |
| 1119 | *final = m; |
| 1120 | worst = severity; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | } |
| 1123 | |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1124 | /* mce_clear_state will clear *final, save locally for use later */ |
| 1125 | m = *final; |
| 1126 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1127 | if (!no_way_out) |
| 1128 | mce_clear_state(toclear); |
| 1129 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1130 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1131 | * Do most of the synchronization with other CPUs. |
| 1132 | * When there's any problem use only local no_way_out state. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1133 | */ |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1134 | if (mce_end(order) < 0) |
| 1135 | no_way_out = worst >= MCE_PANIC_SEVERITY; |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1136 | |
| 1137 | /* |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1138 | * At insane "tolerant" levels we take no action. Otherwise |
| 1139 | * we only die if we have no other choice. For less serious |
| 1140 | * issues we try to recover, or limit damage to the current |
| 1141 | * process. |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1142 | */ |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1143 | if (cfg->tolerant < 3) { |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1144 | if (no_way_out) |
| 1145 | mce_panic("Fatal machine check on current CPU", &m, msg); |
| 1146 | if (worst == MCE_AR_SEVERITY) { |
| 1147 | /* schedule action before return to userland */ |
Tony Luck | dad1743 | 2012-05-14 15:07:48 -0700 | [diff] [blame] | 1148 | mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV); |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1149 | set_thread_flag(TIF_MCE_NOTIFY); |
| 1150 | } else if (kill_it) { |
| 1151 | force_sig(SIGBUS, current); |
| 1152 | } |
| 1153 | } |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1154 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1155 | if (worst > 0) |
| 1156 | mce_report_event(regs); |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 1157 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); |
Andi Kleen | 3256169 | 2009-05-27 21:56:53 +0200 | [diff] [blame] | 1158 | out: |
Andi Kleen | 553f265 | 2006-04-07 19:49:57 +0200 | [diff] [blame] | 1159 | atomic_dec(&mce_entry); |
Andi Kleen | 88921be | 2009-05-27 21:56:51 +0200 | [diff] [blame] | 1160 | sync_core(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | } |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 1162 | EXPORT_SYMBOL_GPL(do_machine_check); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 | |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1164 | #ifndef CONFIG_MEMORY_FAILURE |
| 1165 | int memory_failure(unsigned long pfn, int vector, int flags) |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1166 | { |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1167 | /* mce_severity() should not hand us an ACTION_REQUIRED error */ |
| 1168 | BUG_ON(flags & MF_ACTION_REQUIRED); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1169 | pr_err("Uncorrected memory error in page 0x%lx ignored\n" |
| 1170 | "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", |
| 1171 | pfn); |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1172 | |
| 1173 | return 0; |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1174 | } |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1175 | #endif |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1176 | |
| 1177 | /* |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1178 | * Called in process context that interrupted by MCE and marked with |
| 1179 | * TIF_MCE_NOTIFY, just before returning to erroneous userland. |
| 1180 | * This code is allowed to sleep. |
| 1181 | * Attempt possible recovery such as calling the high level VM handler to |
| 1182 | * process any corrupted pages, and kill/signal current process if required. |
| 1183 | * Action required errors are handled here. |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1184 | */ |
| 1185 | void mce_notify_process(void) |
| 1186 | { |
| 1187 | unsigned long pfn; |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1188 | struct mce_info *mi = mce_find_info(); |
Tony Luck | 6751ed6 | 2012-07-11 10:20:47 -0700 | [diff] [blame] | 1189 | int flags = MF_ACTION_REQUIRED; |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1190 | |
| 1191 | if (!mi) |
| 1192 | mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL); |
| 1193 | pfn = mi->paddr >> PAGE_SHIFT; |
| 1194 | |
| 1195 | clear_thread_flag(TIF_MCE_NOTIFY); |
| 1196 | |
| 1197 | pr_err("Uncorrected hardware memory error in user-access at %llx", |
| 1198 | mi->paddr); |
Tony Luck | dad1743 | 2012-05-14 15:07:48 -0700 | [diff] [blame] | 1199 | /* |
| 1200 | * We must call memory_failure() here even if the current process is |
| 1201 | * doomed. We still need to mark the page as poisoned and alert any |
| 1202 | * other users of the page. |
| 1203 | */ |
Tony Luck | 6751ed6 | 2012-07-11 10:20:47 -0700 | [diff] [blame] | 1204 | if (!mi->restartable) |
| 1205 | flags |= MF_MUST_KILL; |
| 1206 | if (memory_failure(pfn, MCE_VECTOR, flags) < 0) { |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1207 | pr_err("Memory error not recovered"); |
| 1208 | force_sig(SIGBUS, current); |
| 1209 | } |
| 1210 | mce_clear_info(mi); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1211 | } |
| 1212 | |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1213 | /* |
| 1214 | * Action optional processing happens here (picking up |
| 1215 | * from the list of faulting pages that do_machine_check() |
| 1216 | * placed into the "ring"). |
| 1217 | */ |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1218 | static void mce_process_work(struct work_struct *dummy) |
| 1219 | { |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1220 | unsigned long pfn; |
| 1221 | |
| 1222 | while (mce_ring_get(&pfn)) |
| 1223 | memory_failure(pfn, MCE_VECTOR, 0); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1224 | } |
| 1225 | |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1226 | #ifdef CONFIG_X86_MCE_INTEL |
| 1227 | /*** |
| 1228 | * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog |
Simon Arlott | 676b185 | 2007-10-20 01:25:36 +0200 | [diff] [blame] | 1229 | * @cpu: The CPU on which the event occurred. |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1230 | * @status: Event status information |
| 1231 | * |
| 1232 | * This function should be called by the thermal interrupt after the |
| 1233 | * event has been processed and the decision was made to log the event |
| 1234 | * further. |
| 1235 | * |
| 1236 | * The status parameter will be saved to the 'status' field of 'struct mce' |
| 1237 | * and historically has been the register value of the |
| 1238 | * MSR_IA32_THERMAL_STATUS (Intel) msr. |
| 1239 | */ |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 1240 | void mce_log_therm_throt_event(__u64 status) |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1241 | { |
| 1242 | struct mce m; |
| 1243 | |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 1244 | mce_setup(&m); |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1245 | m.bank = MCE_THERMAL_BANK; |
| 1246 | m.status = status; |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1247 | mce_log(&m); |
| 1248 | } |
| 1249 | #endif /* CONFIG_X86_MCE_INTEL */ |
| 1250 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | /* |
Tim Hockin | 8a336b0 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 1252 | * Periodic polling timer for "silent" machine check errors. If the |
| 1253 | * poller finds an MCE, poll 2x faster. When the poller finds no more |
| 1254 | * errors, poll 2x slower (up to check_interval seconds). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1255 | */ |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1256 | static unsigned long check_interval = 5 * 60; /* 5 minutes */ |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1257 | |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1258 | static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1259 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1261 | static unsigned long mce_adjust_timer_default(unsigned long interval) |
| 1262 | { |
| 1263 | return interval; |
| 1264 | } |
| 1265 | |
| 1266 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = |
| 1267 | mce_adjust_timer_default; |
| 1268 | |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1269 | static void mce_timer_fn(unsigned long data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1270 | { |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1271 | struct timer_list *t = &__get_cpu_var(mce_timer); |
| 1272 | unsigned long iv; |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1273 | |
| 1274 | WARN_ON(smp_processor_id() != data); |
| 1275 | |
Tejun Heo | 7b543a5 | 2010-12-18 16:30:05 +0100 | [diff] [blame] | 1276 | if (mce_available(__this_cpu_ptr(&cpu_info))) { |
Andi Kleen | ee031c3 | 2009-02-12 13:49:34 +0100 | [diff] [blame] | 1277 | machine_check_poll(MCP_TIMESTAMP, |
| 1278 | &__get_cpu_var(mce_poll_banks)); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1279 | mce_intel_cmci_poll(); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1280 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | |
| 1282 | /* |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1283 | * Alert userspace if needed. If we logged an MCE, reduce the |
| 1284 | * polling interval, otherwise increase the polling interval. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | */ |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1286 | iv = __this_cpu_read(mce_next_interval); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1287 | if (mce_notify_irq()) { |
Chen Gong | 958fb3c | 2012-06-05 10:35:02 +0800 | [diff] [blame] | 1288 | iv = max(iv / 2, (unsigned long) HZ/100); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1289 | } else { |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1290 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1291 | iv = mce_adjust_timer(iv); |
| 1292 | } |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1293 | __this_cpu_write(mce_next_interval, iv); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1294 | /* Might have become 0 after CMCI storm subsided */ |
| 1295 | if (iv) { |
| 1296 | t->expires = jiffies + iv; |
| 1297 | add_timer_on(t, smp_processor_id()); |
| 1298 | } |
| 1299 | } |
Tim Hockin | 8a336b0 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 1300 | |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1301 | /* |
| 1302 | * Ensure that the timer is firing in @interval from now. |
| 1303 | */ |
| 1304 | void mce_timer_kick(unsigned long interval) |
| 1305 | { |
| 1306 | struct timer_list *t = &__get_cpu_var(mce_timer); |
| 1307 | unsigned long when = jiffies + interval; |
| 1308 | unsigned long iv = __this_cpu_read(mce_next_interval); |
| 1309 | |
| 1310 | if (timer_pending(t)) { |
| 1311 | if (time_before(when, t->expires)) |
| 1312 | mod_timer_pinned(t, when); |
| 1313 | } else { |
| 1314 | t->expires = round_jiffies(when); |
| 1315 | add_timer_on(t, smp_processor_id()); |
| 1316 | } |
| 1317 | if (interval < iv) |
| 1318 | __this_cpu_write(mce_next_interval, interval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | } |
| 1320 | |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 1321 | /* Must not be called in IRQ context where del_timer_sync() can deadlock */ |
| 1322 | static void mce_timer_delete_all(void) |
| 1323 | { |
| 1324 | int cpu; |
| 1325 | |
| 1326 | for_each_online_cpu(cpu) |
| 1327 | del_timer_sync(&per_cpu(mce_timer, cpu)); |
| 1328 | } |
| 1329 | |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1330 | static void mce_do_trigger(struct work_struct *work) |
| 1331 | { |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 1332 | call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1333 | } |
| 1334 | |
| 1335 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); |
| 1336 | |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1337 | /* |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1338 | * Notify the user(s) about new machine check events. |
| 1339 | * Can be called from interrupt context, but not from machine check/NMI |
| 1340 | * context. |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1341 | */ |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 1342 | int mce_notify_irq(void) |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1343 | { |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 1344 | /* Not more than two messages every minute */ |
| 1345 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); |
| 1346 | |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 1347 | if (test_and_clear_bit(0, &mce_need_notify)) { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1348 | /* wake processes polling /dev/mcelog */ |
| 1349 | wake_up_interruptible(&mce_chrdev_wait); |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1350 | |
Tejun Heo | 4d899be | 2012-12-21 17:57:05 -0800 | [diff] [blame] | 1351 | if (mce_helper[0]) |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1352 | schedule_work(&mce_trigger_work); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1353 | |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 1354 | if (__ratelimit(&ratelimit)) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 1355 | pr_info(HW_ERR "Machine check events logged\n"); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1356 | |
| 1357 | return 1; |
| 1358 | } |
| 1359 | return 0; |
| 1360 | } |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 1361 | EXPORT_SYMBOL_GPL(mce_notify_irq); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1362 | |
Hidetoshi Seto | cffd377 | 2009-11-12 15:52:40 +0900 | [diff] [blame] | 1363 | static int __cpuinit __mcheck_cpu_mce_banks_init(void) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1364 | { |
| 1365 | int i; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1366 | u8 num_banks = mca_cfg.banks; |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1367 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1368 | mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL); |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1369 | if (!mce_banks) |
| 1370 | return -ENOMEM; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1371 | |
| 1372 | for (i = 0; i < num_banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1373 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1374 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1375 | b->ctl = -1ULL; |
| 1376 | b->init = 1; |
| 1377 | } |
| 1378 | return 0; |
| 1379 | } |
| 1380 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1381 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 | * Initialize Machine Checks for a CPU. |
| 1383 | */ |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1384 | static int __cpuinit __mcheck_cpu_cap_init(void) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1385 | { |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1386 | unsigned b; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1387 | u64 cap; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1388 | |
| 1389 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
Thomas Gleixner | 01c6680 | 2009-04-08 12:31:24 +0200 | [diff] [blame] | 1390 | |
| 1391 | b = cap & MCG_BANKCNT_MASK; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1392 | if (!mca_cfg.banks) |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1393 | pr_info("CPU supports %d MCE banks\n", b); |
Ingo Molnar | b659294 | 2009-04-08 12:31:27 +0200 | [diff] [blame] | 1394 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1395 | if (b > MAX_NR_BANKS) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1396 | pr_warn("Using only %u machine check banks out of %u\n", |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1397 | MAX_NR_BANKS, b); |
| 1398 | b = MAX_NR_BANKS; |
| 1399 | } |
| 1400 | |
| 1401 | /* Don't support asymmetric configurations today */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1402 | WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); |
| 1403 | mca_cfg.banks = b; |
| 1404 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1405 | if (!mce_banks) { |
Hidetoshi Seto | cffd377 | 2009-11-12 15:52:40 +0900 | [diff] [blame] | 1406 | int err = __mcheck_cpu_mce_banks_init(); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1407 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1408 | if (err) |
| 1409 | return err; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1410 | } |
| 1411 | |
| 1412 | /* Use accurate RIP reporting if available. */ |
Thomas Gleixner | 01c6680 | 2009-04-08 12:31:24 +0200 | [diff] [blame] | 1413 | if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1414 | mca_cfg.rip_msr = MSR_IA32_MCG_EIP; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1415 | |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1416 | if (cap & MCG_SER_P) |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1417 | mca_cfg.ser = true; |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1418 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1419 | return 0; |
| 1420 | } |
| 1421 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1422 | static void __mcheck_cpu_init_generic(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | { |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1424 | enum mcp_flags m_fl = 0; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1425 | mce_banks_t all_banks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | u64 cap; |
| 1427 | int i; |
| 1428 | |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1429 | if (!mca_cfg.bootlog) |
| 1430 | m_fl = MCP_DONTLOG; |
| 1431 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1432 | /* |
| 1433 | * Log the machine checks left over from the previous reset. |
| 1434 | */ |
Andi Kleen | ee031c3 | 2009-02-12 13:49:34 +0100 | [diff] [blame] | 1435 | bitmap_fill(all_banks, MAX_NR_BANKS); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1436 | machine_check_poll(MCP_UC | m_fl, &all_banks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | |
| 1438 | set_in_cr4(X86_CR4_MCE); |
| 1439 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1440 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1441 | if (cap & MCG_CTL_P) |
| 1442 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
| 1443 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1444 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1445 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1446 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1447 | if (!b->init) |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1448 | continue; |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 1449 | wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); |
| 1450 | wrmsrl(MSR_IA32_MCx_STATUS(i), 0); |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1451 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1452 | } |
| 1453 | |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1454 | /* |
| 1455 | * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and |
| 1456 | * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM |
| 1457 | * Vol 3B Table 15-20). But this confuses both the code that determines |
| 1458 | * whether the machine check occurred in kernel or user mode, and also |
| 1459 | * the severity assessment code. Pretend that EIPV was set, and take the |
| 1460 | * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. |
| 1461 | */ |
| 1462 | static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) |
| 1463 | { |
| 1464 | if (bank != 0) |
| 1465 | return; |
| 1466 | if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) |
| 1467 | return; |
| 1468 | if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| |
| 1469 | MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| |
| 1470 | MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| |
| 1471 | MCACOD)) != |
| 1472 | (MCI_STATUS_UC|MCI_STATUS_EN| |
| 1473 | MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| |
| 1474 | MCI_STATUS_AR|MCACOD_INSTR)) |
| 1475 | return; |
| 1476 | |
| 1477 | m->mcgstatus |= MCG_STATUS_EIPV; |
| 1478 | m->ip = regs->ip; |
| 1479 | m->cs = regs->cs; |
| 1480 | } |
| 1481 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | /* Add per CPU specific workarounds here */ |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1483 | static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1484 | { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1485 | struct mca_config *cfg = &mca_cfg; |
| 1486 | |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1487 | if (c->x86_vendor == X86_VENDOR_UNKNOWN) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1488 | pr_info("unknown CPU type - not enabling MCE support\n"); |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1489 | return -EOPNOTSUPP; |
| 1490 | } |
| 1491 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1492 | /* This should be disabled by the BIOS, but isn't always */ |
Jan Beulich | 911f6a7 | 2008-04-22 16:22:21 +0100 | [diff] [blame] | 1493 | if (c->x86_vendor == X86_VENDOR_AMD) { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1494 | if (c->x86 == 15 && cfg->banks > 4) { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1495 | /* |
| 1496 | * disable GART TBL walk error reporting, which |
| 1497 | * trips off incorrectly with the IOMMU & 3ware |
| 1498 | * & Cerberus: |
| 1499 | */ |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1500 | clear_bit(10, (unsigned long *)&mce_banks[4].ctl); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1501 | } |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1502 | if (c->x86 <= 17 && cfg->bootlog < 0) { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1503 | /* |
| 1504 | * Lots of broken BIOS around that don't clear them |
| 1505 | * by default and leave crap in there. Don't log: |
| 1506 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1507 | cfg->bootlog = 0; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1508 | } |
Andi Kleen | 2e6f694 | 2009-04-27 18:42:48 +0200 | [diff] [blame] | 1509 | /* |
| 1510 | * Various K7s with broken bank 0 around. Always disable |
| 1511 | * by default. |
| 1512 | */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1513 | if (c->x86 == 6 && cfg->banks > 0) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1514 | mce_banks[0].ctl = 0; |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1515 | |
| 1516 | /* |
| 1517 | * Turn off MC4_MISC thresholding banks on those models since |
| 1518 | * they're not supported there. |
| 1519 | */ |
| 1520 | if (c->x86 == 0x15 && |
| 1521 | (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { |
| 1522 | int i; |
| 1523 | u64 val, hwcr; |
| 1524 | bool need_toggle; |
| 1525 | u32 msrs[] = { |
| 1526 | 0x00000413, /* MC4_MISC0 */ |
| 1527 | 0xc0000408, /* MC4_MISC1 */ |
| 1528 | }; |
| 1529 | |
| 1530 | rdmsrl(MSR_K7_HWCR, hwcr); |
| 1531 | |
| 1532 | /* McStatusWrEn has to be set */ |
| 1533 | need_toggle = !(hwcr & BIT(18)); |
| 1534 | |
| 1535 | if (need_toggle) |
| 1536 | wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); |
| 1537 | |
| 1538 | for (i = 0; i < ARRAY_SIZE(msrs); i++) { |
| 1539 | rdmsrl(msrs[i], val); |
| 1540 | |
| 1541 | /* CntP bit set? */ |
Borislav Petkov | 80f033610 | 2012-05-22 12:53:46 +0200 | [diff] [blame] | 1542 | if (val & BIT_64(62)) { |
| 1543 | val &= ~BIT_64(62); |
| 1544 | wrmsrl(msrs[i], val); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1545 | } |
| 1546 | } |
| 1547 | |
| 1548 | /* restore old settings */ |
| 1549 | if (need_toggle) |
| 1550 | wrmsrl(MSR_K7_HWCR, hwcr); |
| 1551 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | } |
Andi Kleen | e583538 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 1553 | |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1554 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
| 1555 | /* |
| 1556 | * SDM documents that on family 6 bank 0 should not be written |
| 1557 | * because it aliases to another special BIOS controlled |
| 1558 | * register. |
| 1559 | * But it's not aliased anymore on model 0x1a+ |
| 1560 | * Don't ignore bank 0 completely because there could be a |
| 1561 | * valid event later, merely don't write CTL0. |
| 1562 | */ |
| 1563 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1564 | if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1565 | mce_banks[0].init = 0; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1566 | |
| 1567 | /* |
| 1568 | * All newer Intel systems support MCE broadcasting. Enable |
| 1569 | * synchronization with a one second timeout. |
| 1570 | */ |
| 1571 | if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1572 | cfg->monarch_timeout < 0) |
| 1573 | cfg->monarch_timeout = USEC_PER_SEC; |
Bartlomiej Zolnierkiewicz | c7f6fa4 | 2009-07-28 23:52:54 +0200 | [diff] [blame] | 1574 | |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1575 | /* |
| 1576 | * There are also broken BIOSes on some Pentium M and |
| 1577 | * earlier systems: |
| 1578 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1579 | if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) |
| 1580 | cfg->bootlog = 0; |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1581 | |
| 1582 | if (c->x86 == 6 && c->x86_model == 45) |
| 1583 | quirk_no_way_out = quirk_sandybridge_ifu; |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1584 | } |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1585 | if (cfg->monarch_timeout < 0) |
| 1586 | cfg->monarch_timeout = 0; |
| 1587 | if (cfg->bootlog != 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1588 | cfg->panic_timeout = 30; |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1589 | |
| 1590 | return 0; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1591 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1593 | static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1594 | { |
| 1595 | if (c->x86 != 5) |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1596 | return 0; |
| 1597 | |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1598 | switch (c->x86_vendor) { |
| 1599 | case X86_VENDOR_INTEL: |
Hidetoshi Seto | c697836 | 2009-06-15 17:22:49 +0900 | [diff] [blame] | 1600 | intel_p5_mcheck_init(c); |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1601 | return 1; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1602 | break; |
| 1603 | case X86_VENDOR_CENTAUR: |
| 1604 | winchip_mcheck_init(c); |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1605 | return 1; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1606 | break; |
| 1607 | } |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1608 | |
| 1609 | return 0; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1610 | } |
| 1611 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1612 | static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1613 | { |
| 1614 | switch (c->x86_vendor) { |
| 1615 | case X86_VENDOR_INTEL: |
| 1616 | mce_intel_feature_init(c); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1617 | mce_adjust_timer = mce_intel_adjust_timer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | break; |
Jacob Shin | 89b831e | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1619 | case X86_VENDOR_AMD: |
| 1620 | mce_amd_feature_init(c); |
| 1621 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | default: |
| 1623 | break; |
| 1624 | } |
| 1625 | } |
| 1626 | |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1627 | static void mce_start_timer(unsigned int cpu, struct timer_list *t) |
| 1628 | { |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1629 | unsigned long iv = mce_adjust_timer(check_interval * HZ); |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1630 | |
| 1631 | __this_cpu_write(mce_next_interval, iv); |
| 1632 | |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1633 | if (mca_cfg.ignore_ce || !iv) |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1634 | return; |
| 1635 | |
| 1636 | t->expires = round_jiffies(jiffies + iv); |
| 1637 | add_timer_on(t, smp_processor_id()); |
| 1638 | } |
| 1639 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1640 | static void __mcheck_cpu_init_timer(void) |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1641 | { |
| 1642 | struct timer_list *t = &__get_cpu_var(mce_timer); |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1643 | unsigned int cpu = smp_processor_id(); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1644 | |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1645 | setup_timer(t, mce_timer_fn, cpu); |
| 1646 | mce_start_timer(cpu, t); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1647 | } |
| 1648 | |
Andi Kleen | 9eda8cb | 2009-07-09 00:31:42 +0200 | [diff] [blame] | 1649 | /* Handle unconfigured int18 (should never happen) */ |
| 1650 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) |
| 1651 | { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1652 | pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", |
Andi Kleen | 9eda8cb | 2009-07-09 00:31:42 +0200 | [diff] [blame] | 1653 | smp_processor_id()); |
| 1654 | } |
| 1655 | |
| 1656 | /* Call the installed machine check handler for this CPU setup. */ |
| 1657 | void (*machine_check_vector)(struct pt_regs *, long error_code) = |
| 1658 | unexpected_machine_check; |
| 1659 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1660 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1661 | * Called for each booted CPU to set up machine checks. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1662 | * Must be called with preempt off: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | */ |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1664 | void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1665 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1666 | if (mca_cfg.disabled) |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1667 | return; |
| 1668 | |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1669 | if (__mcheck_cpu_ancient_init(c)) |
| 1670 | return; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1671 | |
Andi Kleen | 5b4408f | 2009-02-12 13:39:30 +0100 | [diff] [blame] | 1672 | if (!mce_available(c)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1673 | return; |
| 1674 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1675 | if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1676 | mca_cfg.disabled = true; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1677 | return; |
| 1678 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1679 | |
Andi Kleen | 5d72792 | 2009-04-27 19:25:48 +0200 | [diff] [blame] | 1680 | machine_check_vector = do_machine_check; |
| 1681 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1682 | __mcheck_cpu_init_generic(); |
| 1683 | __mcheck_cpu_init_vendor(c); |
| 1684 | __mcheck_cpu_init_timer(); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1685 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); |
Hidetoshi Seto | b77e70b | 2011-06-08 10:56:02 +0900 | [diff] [blame] | 1686 | init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | } |
| 1688 | |
| 1689 | /* |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1690 | * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1691 | */ |
| 1692 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1693 | static DEFINE_SPINLOCK(mce_chrdev_state_lock); |
| 1694 | static int mce_chrdev_open_count; /* #times opened */ |
| 1695 | static int mce_chrdev_open_exclu; /* already open exclusive? */ |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1696 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1697 | static int mce_chrdev_open(struct inode *inode, struct file *file) |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1698 | { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1699 | spin_lock(&mce_chrdev_state_lock); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1700 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1701 | if (mce_chrdev_open_exclu || |
| 1702 | (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { |
| 1703 | spin_unlock(&mce_chrdev_state_lock); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1704 | |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1705 | return -EBUSY; |
| 1706 | } |
| 1707 | |
| 1708 | if (file->f_flags & O_EXCL) |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1709 | mce_chrdev_open_exclu = 1; |
| 1710 | mce_chrdev_open_count++; |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1711 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1712 | spin_unlock(&mce_chrdev_state_lock); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1713 | |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1714 | return nonseekable_open(inode, file); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1715 | } |
| 1716 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1717 | static int mce_chrdev_release(struct inode *inode, struct file *file) |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1718 | { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1719 | spin_lock(&mce_chrdev_state_lock); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1720 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1721 | mce_chrdev_open_count--; |
| 1722 | mce_chrdev_open_exclu = 0; |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1723 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1724 | spin_unlock(&mce_chrdev_state_lock); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1725 | |
| 1726 | return 0; |
| 1727 | } |
| 1728 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1729 | static void collect_tscs(void *data) |
| 1730 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1731 | unsigned long *cpu_tsc = (unsigned long *)data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1733 | rdtscll(cpu_tsc[smp_processor_id()]); |
| 1734 | } |
| 1735 | |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1736 | static int mce_apei_read_done; |
| 1737 | |
| 1738 | /* Collect MCE record of previous boot in persistent storage via APEI ERST. */ |
| 1739 | static int __mce_read_apei(char __user **ubuf, size_t usize) |
| 1740 | { |
| 1741 | int rc; |
| 1742 | u64 record_id; |
| 1743 | struct mce m; |
| 1744 | |
| 1745 | if (usize < sizeof(struct mce)) |
| 1746 | return -EINVAL; |
| 1747 | |
| 1748 | rc = apei_read_mce(&m, &record_id); |
| 1749 | /* Error or no more MCE record */ |
| 1750 | if (rc <= 0) { |
| 1751 | mce_apei_read_done = 1; |
Naoya Horiguchi | fadd85f | 2012-01-23 15:54:52 -0500 | [diff] [blame] | 1752 | /* |
| 1753 | * When ERST is disabled, mce_chrdev_read() should return |
| 1754 | * "no record" instead of "no device." |
| 1755 | */ |
| 1756 | if (rc == -ENODEV) |
| 1757 | return 0; |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1758 | return rc; |
| 1759 | } |
| 1760 | rc = -EFAULT; |
| 1761 | if (copy_to_user(*ubuf, &m, sizeof(struct mce))) |
| 1762 | return rc; |
| 1763 | /* |
| 1764 | * In fact, we should have cleared the record after that has |
| 1765 | * been flushed to the disk or sent to network in |
| 1766 | * /sbin/mcelog, but we have no interface to support that now, |
| 1767 | * so just clear it to avoid duplication. |
| 1768 | */ |
| 1769 | rc = apei_clear_mce(record_id); |
| 1770 | if (rc) { |
| 1771 | mce_apei_read_done = 1; |
| 1772 | return rc; |
| 1773 | } |
| 1774 | *ubuf += sizeof(struct mce); |
| 1775 | |
| 1776 | return 0; |
| 1777 | } |
| 1778 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1779 | static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf, |
| 1780 | size_t usize, loff_t *off) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1781 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1782 | char __user *buf = ubuf; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1783 | unsigned long *cpu_tsc; |
| 1784 | unsigned prev, next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | int i, err; |
| 1786 | |
Mike Travis | 6bca67f | 2008-07-18 18:11:27 -0700 | [diff] [blame] | 1787 | cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); |
Andi Kleen | f0de53b | 2005-04-16 15:25:10 -0700 | [diff] [blame] | 1788 | if (!cpu_tsc) |
| 1789 | return -ENOMEM; |
| 1790 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1791 | mutex_lock(&mce_chrdev_read_mutex); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1792 | |
| 1793 | if (!mce_apei_read_done) { |
| 1794 | err = __mce_read_apei(&buf, usize); |
| 1795 | if (err || buf != ubuf) |
| 1796 | goto out; |
| 1797 | } |
| 1798 | |
Paul E. McKenney | f56e8a0 | 2010-03-05 15:03:27 -0800 | [diff] [blame] | 1799 | next = rcu_dereference_check_mce(mcelog.next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 | |
| 1801 | /* Only supports full reads right now */ |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1802 | err = -EINVAL; |
| 1803 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) |
| 1804 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1805 | |
| 1806 | err = 0; |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1807 | prev = 0; |
| 1808 | do { |
| 1809 | for (i = prev; i < next; i++) { |
| 1810 | unsigned long start = jiffies; |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1811 | struct mce *m = &mcelog.entry[i]; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1812 | |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1813 | while (!m->finished) { |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1814 | if (time_after_eq(jiffies, start + 2)) { |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1815 | memset(m, 0, sizeof(*m)); |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1816 | goto timeout; |
| 1817 | } |
| 1818 | cpu_relax(); |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 1819 | } |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1820 | smp_rmb(); |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1821 | err |= copy_to_user(buf, m, sizeof(*m)); |
| 1822 | buf += sizeof(*m); |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1823 | timeout: |
| 1824 | ; |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 1825 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1826 | |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1827 | memset(mcelog.entry + prev, 0, |
| 1828 | (next - prev) * sizeof(struct mce)); |
| 1829 | prev = next; |
| 1830 | next = cmpxchg(&mcelog.next, prev, 0); |
| 1831 | } while (next != prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1832 | |
Paul E. McKenney | b2b1866 | 2005-06-25 14:55:38 -0700 | [diff] [blame] | 1833 | synchronize_sched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1835 | /* |
| 1836 | * Collect entries that were still getting written before the |
| 1837 | * synchronize. |
| 1838 | */ |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 1839 | on_each_cpu(collect_tscs, cpu_tsc, 1); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1840 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1841 | for (i = next; i < MCE_LOG_LEN; i++) { |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1842 | struct mce *m = &mcelog.entry[i]; |
| 1843 | |
| 1844 | if (m->finished && m->tsc < cpu_tsc[m->cpu]) { |
| 1845 | err |= copy_to_user(buf, m, sizeof(*m)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1846 | smp_rmb(); |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1847 | buf += sizeof(*m); |
| 1848 | memset(m, 0, sizeof(*m)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1849 | } |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1850 | } |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1851 | |
| 1852 | if (err) |
| 1853 | err = -EFAULT; |
| 1854 | |
| 1855 | out: |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1856 | mutex_unlock(&mce_chrdev_read_mutex); |
Andi Kleen | f0de53b | 2005-04-16 15:25:10 -0700 | [diff] [blame] | 1857 | kfree(cpu_tsc); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1858 | |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1859 | return err ? err : buf - ubuf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1860 | } |
| 1861 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1862 | static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait) |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1863 | { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1864 | poll_wait(file, &mce_chrdev_wait, wait); |
Paul E. McKenney | a4dd992 | 2011-04-01 07:15:14 -0700 | [diff] [blame] | 1865 | if (rcu_access_index(mcelog.next)) |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1866 | return POLLIN | POLLRDNORM; |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1867 | if (!mce_apei_read_done && apei_check_mce()) |
| 1868 | return POLLIN | POLLRDNORM; |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1869 | return 0; |
| 1870 | } |
| 1871 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1872 | static long mce_chrdev_ioctl(struct file *f, unsigned int cmd, |
| 1873 | unsigned long arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1874 | { |
| 1875 | int __user *p = (int __user *)arg; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1876 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1877 | if (!capable(CAP_SYS_ADMIN)) |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1878 | return -EPERM; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1879 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1880 | switch (cmd) { |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1881 | case MCE_GET_RECORD_LEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 | return put_user(sizeof(struct mce), p); |
| 1883 | case MCE_GET_LOG_LEN: |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1884 | return put_user(MCE_LOG_LEN, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1885 | case MCE_GETCLEAR_FLAGS: { |
| 1886 | unsigned flags; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1887 | |
| 1888 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1889 | flags = mcelog.flags; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1890 | } while (cmpxchg(&mcelog.flags, flags, 0) != flags); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1891 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1892 | return put_user(flags, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1893 | } |
| 1894 | default: |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1895 | return -ENOTTY; |
| 1896 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 | } |
| 1898 | |
Luck, Tony | 66f5ddf | 2011-11-03 11:46:47 -0700 | [diff] [blame] | 1899 | static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf, |
| 1900 | size_t usize, loff_t *off); |
| 1901 | |
| 1902 | void register_mce_write_callback(ssize_t (*fn)(struct file *filp, |
| 1903 | const char __user *ubuf, |
| 1904 | size_t usize, loff_t *off)) |
| 1905 | { |
| 1906 | mce_write = fn; |
| 1907 | } |
| 1908 | EXPORT_SYMBOL_GPL(register_mce_write_callback); |
| 1909 | |
| 1910 | ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf, |
| 1911 | size_t usize, loff_t *off) |
| 1912 | { |
| 1913 | if (mce_write) |
| 1914 | return mce_write(filp, ubuf, usize, off); |
| 1915 | else |
| 1916 | return -EINVAL; |
| 1917 | } |
| 1918 | |
| 1919 | static const struct file_operations mce_chrdev_ops = { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1920 | .open = mce_chrdev_open, |
| 1921 | .release = mce_chrdev_release, |
| 1922 | .read = mce_chrdev_read, |
Luck, Tony | 66f5ddf | 2011-11-03 11:46:47 -0700 | [diff] [blame] | 1923 | .write = mce_chrdev_write, |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1924 | .poll = mce_chrdev_poll, |
| 1925 | .unlocked_ioctl = mce_chrdev_ioctl, |
| 1926 | .llseek = no_llseek, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1927 | }; |
| 1928 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1929 | static struct miscdevice mce_chrdev_device = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1930 | MISC_MCELOG_MINOR, |
| 1931 | "mcelog", |
| 1932 | &mce_chrdev_ops, |
| 1933 | }; |
| 1934 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1935 | /* |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1936 | * mce=off Disables machine check |
| 1937 | * mce=no_cmci Disables CMCI |
| 1938 | * mce=dont_log_ce Clears corrected events silently, no log created for CEs. |
| 1939 | * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1940 | * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) |
| 1941 | * monarchtimeout is how long to wait for other CPUs on machine |
| 1942 | * check, or 0 to not wait |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 1943 | * mce=bootlog Log MCEs from before booting. Disabled by default on AMD. |
| 1944 | * mce=nobootlog Don't log MCEs from before booting. |
Naveen N. Rao | 450cc20 | 2012-09-27 10:08:00 -0700 | [diff] [blame] | 1945 | * mce=bios_cmci_threshold Don't program the CMCI threshold |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 1946 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1947 | static int __init mcheck_enable(char *str) |
| 1948 | { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1949 | struct mca_config *cfg = &mca_cfg; |
| 1950 | |
Bartlomiej Zolnierkiewicz | e3346fc | 2009-07-28 23:55:09 +0200 | [diff] [blame] | 1951 | if (*str == 0) { |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1952 | enable_p5_mce(); |
Bartlomiej Zolnierkiewicz | e3346fc | 2009-07-28 23:55:09 +0200 | [diff] [blame] | 1953 | return 1; |
| 1954 | } |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1955 | if (*str == '=') |
| 1956 | str++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1957 | if (!strcmp(str, "off")) |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1958 | cfg->disabled = true; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1959 | else if (!strcmp(str, "no_cmci")) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1960 | cfg->cmci_disabled = true; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1961 | else if (!strcmp(str, "dont_log_ce")) |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1962 | cfg->dont_log_ce = true; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1963 | else if (!strcmp(str, "ignore_ce")) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1964 | cfg->ignore_ce = true; |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 1965 | else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1966 | cfg->bootlog = (str[0] == 'b'); |
Naveen N. Rao | 450cc20 | 2012-09-27 10:08:00 -0700 | [diff] [blame] | 1967 | else if (!strcmp(str, "bios_cmci_threshold")) |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1968 | cfg->bios_cmci_threshold = true; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1969 | else if (isdigit(str[0])) { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1970 | get_option(&str, &(cfg->tolerant)); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1971 | if (*str == ',') { |
| 1972 | ++str; |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1973 | get_option(&str, &(cfg->monarch_timeout)); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1974 | } |
| 1975 | } else { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1976 | pr_info("mce argument %s ignored. Please use /sys\n", str); |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 1977 | return 0; |
| 1978 | } |
OGAWA Hirofumi | 9b41046 | 2006-03-31 02:30:33 -0800 | [diff] [blame] | 1979 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1980 | } |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1981 | __setup("mce", mcheck_enable); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1982 | |
Yong Wang | a2202aa | 2009-11-10 09:38:24 +0800 | [diff] [blame] | 1983 | int __init mcheck_init(void) |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 1984 | { |
Yong Wang | a2202aa | 2009-11-10 09:38:24 +0800 | [diff] [blame] | 1985 | mcheck_intel_therm_init(); |
| 1986 | |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 1987 | return 0; |
| 1988 | } |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 1989 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1990 | /* |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 1991 | * mce_syscore: PM support |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1992 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1993 | |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 1994 | /* |
| 1995 | * Disable machine checks on suspend and shutdown. We can't really handle |
| 1996 | * them later. |
| 1997 | */ |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1998 | static int mce_disable_error_reporting(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 1999 | { |
| 2000 | int i; |
| 2001 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2002 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2003 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 2004 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2005 | if (b->init) |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 2006 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 2007 | } |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2008 | return 0; |
| 2009 | } |
| 2010 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2011 | static int mce_syscore_suspend(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2012 | { |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2013 | return mce_disable_error_reporting(); |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2014 | } |
| 2015 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2016 | static void mce_syscore_shutdown(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2017 | { |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 2018 | mce_disable_error_reporting(); |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2019 | } |
| 2020 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2021 | /* |
| 2022 | * On resume clear all MCE state. Don't want to see leftovers from the BIOS. |
| 2023 | * Only one CPU is active at this time, the others get re-added later using |
| 2024 | * CPU hotplug: |
| 2025 | */ |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2026 | static void mce_syscore_resume(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2027 | { |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2028 | __mcheck_cpu_init_generic(); |
Tejun Heo | 7b543a5 | 2010-12-18 16:30:05 +0100 | [diff] [blame] | 2029 | __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2030 | } |
| 2031 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 2032 | static struct syscore_ops mce_syscore_ops = { |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2033 | .suspend = mce_syscore_suspend, |
| 2034 | .shutdown = mce_syscore_shutdown, |
| 2035 | .resume = mce_syscore_resume, |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 2036 | }; |
| 2037 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2038 | /* |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2039 | * mce_device: Sysfs support |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2040 | */ |
| 2041 | |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2042 | static void mce_cpu_restart(void *data) |
| 2043 | { |
Tejun Heo | 7b543a5 | 2010-12-18 16:30:05 +0100 | [diff] [blame] | 2044 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 33edbf0 | 2009-06-15 17:18:45 +0900 | [diff] [blame] | 2045 | return; |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2046 | __mcheck_cpu_init_generic(); |
| 2047 | __mcheck_cpu_init_timer(); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2048 | } |
| 2049 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2050 | /* Reinit MCEs after user configuration changes */ |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2051 | static void mce_restart(void) |
| 2052 | { |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2053 | mce_timer_delete_all(); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2054 | on_each_cpu(mce_cpu_restart, NULL, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2055 | } |
| 2056 | |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2057 | /* Toggle features for corrected errors */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2058 | static void mce_disable_cmci(void *data) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2059 | { |
Tejun Heo | 7b543a5 | 2010-12-18 16:30:05 +0100 | [diff] [blame] | 2060 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2061 | return; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2062 | cmci_clear(); |
| 2063 | } |
| 2064 | |
| 2065 | static void mce_enable_ce(void *all) |
| 2066 | { |
Tejun Heo | 7b543a5 | 2010-12-18 16:30:05 +0100 | [diff] [blame] | 2067 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2068 | return; |
| 2069 | cmci_reenable(); |
| 2070 | cmci_recheck(); |
| 2071 | if (all) |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2072 | __mcheck_cpu_init_timer(); |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2073 | } |
| 2074 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2075 | static struct bus_type mce_subsys = { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2076 | .name = "machinecheck", |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2077 | .dev_name = "machinecheck", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | }; |
| 2079 | |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2080 | DEFINE_PER_CPU(struct device *, mce_device); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2081 | |
| 2082 | __cpuinitdata |
| 2083 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2084 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2085 | static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2086 | { |
| 2087 | return container_of(attr, struct mce_bank, attr); |
| 2088 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2089 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2090 | static ssize_t show_bank(struct device *s, struct device_attribute *attr, |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2091 | char *buf) |
| 2092 | { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2093 | return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2094 | } |
| 2095 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2096 | static ssize_t set_bank(struct device *s, struct device_attribute *attr, |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2097 | const char *buf, size_t size) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2098 | { |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2099 | u64 new; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2100 | |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2101 | if (strict_strtoull(buf, 0, &new) < 0) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2102 | return -EINVAL; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2103 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2104 | attr_to_bank(attr)->ctl = new; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2105 | mce_restart(); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2106 | |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2107 | return size; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2108 | } |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2109 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2110 | static ssize_t |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2111 | show_trigger(struct device *s, struct device_attribute *attr, char *buf) |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2112 | { |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 2113 | strcpy(buf, mce_helper); |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2114 | strcat(buf, "\n"); |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 2115 | return strlen(mce_helper) + 1; |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2116 | } |
| 2117 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2118 | static ssize_t set_trigger(struct device *s, struct device_attribute *attr, |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2119 | const char *buf, size_t siz) |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2120 | { |
| 2121 | char *p; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2122 | |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 2123 | strncpy(mce_helper, buf, sizeof(mce_helper)); |
| 2124 | mce_helper[sizeof(mce_helper)-1] = 0; |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 2125 | p = strchr(mce_helper, '\n'); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2126 | |
Jan Beulich | e9084ec | 2009-07-16 09:45:11 +0100 | [diff] [blame] | 2127 | if (p) |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2128 | *p = 0; |
| 2129 | |
Jan Beulich | e9084ec | 2009-07-16 09:45:11 +0100 | [diff] [blame] | 2130 | return strlen(mce_helper) + !!p; |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2131 | } |
| 2132 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2133 | static ssize_t set_ignore_ce(struct device *s, |
| 2134 | struct device_attribute *attr, |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2135 | const char *buf, size_t size) |
| 2136 | { |
| 2137 | u64 new; |
| 2138 | |
| 2139 | if (strict_strtoull(buf, 0, &new) < 0) |
| 2140 | return -EINVAL; |
| 2141 | |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2142 | if (mca_cfg.ignore_ce ^ !!new) { |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2143 | if (new) { |
| 2144 | /* disable ce features */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2145 | mce_timer_delete_all(); |
| 2146 | on_each_cpu(mce_disable_cmci, NULL, 1); |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2147 | mca_cfg.ignore_ce = true; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2148 | } else { |
| 2149 | /* enable ce features */ |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2150 | mca_cfg.ignore_ce = false; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2151 | on_each_cpu(mce_enable_ce, (void *)1, 1); |
| 2152 | } |
| 2153 | } |
| 2154 | return size; |
| 2155 | } |
| 2156 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2157 | static ssize_t set_cmci_disabled(struct device *s, |
| 2158 | struct device_attribute *attr, |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2159 | const char *buf, size_t size) |
| 2160 | { |
| 2161 | u64 new; |
| 2162 | |
| 2163 | if (strict_strtoull(buf, 0, &new) < 0) |
| 2164 | return -EINVAL; |
| 2165 | |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2166 | if (mca_cfg.cmci_disabled ^ !!new) { |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2167 | if (new) { |
| 2168 | /* disable cmci */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2169 | on_each_cpu(mce_disable_cmci, NULL, 1); |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2170 | mca_cfg.cmci_disabled = true; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2171 | } else { |
| 2172 | /* enable cmci */ |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2173 | mca_cfg.cmci_disabled = false; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2174 | on_each_cpu(mce_enable_ce, NULL, 1); |
| 2175 | } |
| 2176 | } |
| 2177 | return size; |
| 2178 | } |
| 2179 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2180 | static ssize_t store_int_with_restart(struct device *s, |
| 2181 | struct device_attribute *attr, |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2182 | const char *buf, size_t size) |
| 2183 | { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2184 | ssize_t ret = device_store_int(s, attr, buf, size); |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2185 | mce_restart(); |
| 2186 | return ret; |
| 2187 | } |
| 2188 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2189 | static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2190 | static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 2191 | static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2192 | static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2193 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2194 | static struct dev_ext_attribute dev_attr_check_interval = { |
| 2195 | __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2196 | &check_interval |
| 2197 | }; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2198 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2199 | static struct dev_ext_attribute dev_attr_ignore_ce = { |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2200 | __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), |
| 2201 | &mca_cfg.ignore_ce |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2202 | }; |
| 2203 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2204 | static struct dev_ext_attribute dev_attr_cmci_disabled = { |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2205 | __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), |
| 2206 | &mca_cfg.cmci_disabled |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2207 | }; |
| 2208 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2209 | static struct device_attribute *mce_device_attrs[] = { |
| 2210 | &dev_attr_tolerant.attr, |
| 2211 | &dev_attr_check_interval.attr, |
| 2212 | &dev_attr_trigger, |
| 2213 | &dev_attr_monarch_timeout.attr, |
| 2214 | &dev_attr_dont_log_ce.attr, |
| 2215 | &dev_attr_ignore_ce.attr, |
| 2216 | &dev_attr_cmci_disabled.attr, |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2217 | NULL |
| 2218 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2219 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2220 | static cpumask_var_t mce_device_initialized; |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2221 | |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2222 | static void mce_device_release(struct device *dev) |
| 2223 | { |
| 2224 | kfree(dev); |
| 2225 | } |
| 2226 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2227 | /* Per cpu device init. All of the cpus still share the same ctrl bank: */ |
| 2228 | static __cpuinit int mce_device_create(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2229 | { |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2230 | struct device *dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2231 | int err; |
Hidetoshi Seto | b1f49f9 | 2009-06-18 14:53:24 +0900 | [diff] [blame] | 2232 | int i, j; |
Mike Travis | 92cb761 | 2007-10-19 20:35:04 +0200 | [diff] [blame] | 2233 | |
Andreas Herrmann | 9036755 | 2007-11-07 02:12:58 +0100 | [diff] [blame] | 2234 | if (!mce_available(&boot_cpu_data)) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2235 | return -EIO; |
| 2236 | |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2237 | dev = kzalloc(sizeof *dev, GFP_KERNEL); |
| 2238 | if (!dev) |
| 2239 | return -ENOMEM; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2240 | dev->id = cpu; |
| 2241 | dev->bus = &mce_subsys; |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2242 | dev->release = &mce_device_release; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2243 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2244 | err = device_register(dev); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2245 | if (err) |
| 2246 | return err; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2247 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2248 | for (i = 0; mce_device_attrs[i]; i++) { |
| 2249 | err = device_create_file(dev, mce_device_attrs[i]); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2250 | if (err) |
| 2251 | goto error; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2252 | } |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2253 | for (j = 0; j < mca_cfg.banks; j++) { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2254 | err = device_create_file(dev, &mce_banks[j].attr); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2255 | if (err) |
| 2256 | goto error2; |
| 2257 | } |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2258 | cpumask_set_cpu(cpu, mce_device_initialized); |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2259 | per_cpu(mce_device, cpu) = dev; |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2260 | |
| 2261 | return 0; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2262 | error2: |
Hidetoshi Seto | b1f49f9 | 2009-06-18 14:53:24 +0900 | [diff] [blame] | 2263 | while (--j >= 0) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2264 | device_remove_file(dev, &mce_banks[j].attr); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2265 | error: |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2266 | while (--i >= 0) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2267 | device_remove_file(dev, mce_device_attrs[i]); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2268 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2269 | device_unregister(dev); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2270 | |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2271 | return err; |
| 2272 | } |
| 2273 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2274 | static __cpuinit void mce_device_remove(unsigned int cpu) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2275 | { |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2276 | struct device *dev = per_cpu(mce_device, cpu); |
Shaohua Li | 73ca535 | 2006-01-11 22:43:06 +0100 | [diff] [blame] | 2277 | int i; |
| 2278 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2279 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2280 | return; |
| 2281 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2282 | for (i = 0; mce_device_attrs[i]; i++) |
| 2283 | device_remove_file(dev, mce_device_attrs[i]); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2284 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2285 | for (i = 0; i < mca_cfg.banks; i++) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2286 | device_remove_file(dev, &mce_banks[i].attr); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2287 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2288 | device_unregister(dev); |
| 2289 | cpumask_clear_cpu(cpu, mce_device_initialized); |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2290 | per_cpu(mce_device, cpu) = NULL; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2291 | } |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2292 | |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2293 | /* Make sure there are no machine checks on offlined CPUs. */ |
Hidetoshi Seto | 767df1b | 2009-11-26 17:29:02 +0900 | [diff] [blame] | 2294 | static void __cpuinit mce_disable_cpu(void *h) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2295 | { |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2296 | unsigned long action = *(unsigned long *)h; |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2297 | int i; |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2298 | |
Tejun Heo | 7b543a5 | 2010-12-18 16:30:05 +0100 | [diff] [blame] | 2299 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2300 | return; |
Hidetoshi Seto | 767df1b | 2009-11-26 17:29:02 +0900 | [diff] [blame] | 2301 | |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2302 | if (!(action & CPU_TASKS_FROZEN)) |
| 2303 | cmci_clear(); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2304 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2305 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 2306 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2307 | if (b->init) |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 2308 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 2309 | } |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2310 | } |
| 2311 | |
Hidetoshi Seto | 767df1b | 2009-11-26 17:29:02 +0900 | [diff] [blame] | 2312 | static void __cpuinit mce_reenable_cpu(void *h) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2313 | { |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2314 | unsigned long action = *(unsigned long *)h; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2315 | int i; |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2316 | |
Tejun Heo | 7b543a5 | 2010-12-18 16:30:05 +0100 | [diff] [blame] | 2317 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2318 | return; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2319 | |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2320 | if (!(action & CPU_TASKS_FROZEN)) |
| 2321 | cmci_reenable(); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2322 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2323 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 2324 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2325 | if (b->init) |
Andi Kleen | a2d32bc | 2009-07-09 00:31:44 +0200 | [diff] [blame] | 2326 | wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 2327 | } |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2328 | } |
| 2329 | |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2330 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2331 | static int __cpuinit |
| 2332 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2333 | { |
| 2334 | unsigned int cpu = (unsigned long)hcpu; |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2335 | struct timer_list *t = &per_cpu(mce_timer, cpu); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2336 | |
Thomas Gleixner | 1a65f97 | 2012-07-19 13:59:40 -0400 | [diff] [blame] | 2337 | switch (action & ~CPU_TASKS_FROZEN) { |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2338 | case CPU_ONLINE: |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2339 | mce_device_create(cpu); |
Rafael J. Wysocki | 8735728 | 2008-08-22 22:23:09 +0200 | [diff] [blame] | 2340 | if (threshold_cpu_callback) |
| 2341 | threshold_cpu_callback(action, cpu); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2342 | break; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2343 | case CPU_DEAD: |
Rafael J. Wysocki | 8735728 | 2008-08-22 22:23:09 +0200 | [diff] [blame] | 2344 | if (threshold_cpu_callback) |
| 2345 | threshold_cpu_callback(action, cpu); |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2346 | mce_device_remove(cpu); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 2347 | mce_intel_hcpu_update(cpu); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2348 | break; |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2349 | case CPU_DOWN_PREPARE: |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2350 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 2351 | del_timer_sync(t); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2352 | break; |
| 2353 | case CPU_DOWN_FAILED: |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2354 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 2355 | mce_start_timer(cpu, t); |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2356 | break; |
Thomas Gleixner | 1a65f97 | 2012-07-19 13:59:40 -0400 | [diff] [blame] | 2357 | } |
| 2358 | |
| 2359 | if (action == CPU_POST_DEAD) { |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2360 | /* intentionally ignoring frozen here */ |
| 2361 | cmci_rediscover(cpu); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2362 | } |
Thomas Gleixner | 1a65f97 | 2012-07-19 13:59:40 -0400 | [diff] [blame] | 2363 | |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2364 | return NOTIFY_OK; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2365 | } |
| 2366 | |
Sam Ravnborg | 1e35669 | 2008-01-30 13:33:36 +0100 | [diff] [blame] | 2367 | static struct notifier_block mce_cpu_notifier __cpuinitdata = { |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2368 | .notifier_call = mce_cpu_callback, |
| 2369 | }; |
| 2370 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2371 | static __init void mce_init_banks(void) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2372 | { |
| 2373 | int i; |
| 2374 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2375 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2376 | struct mce_bank *b = &mce_banks[i]; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2377 | struct device_attribute *a = &b->attr; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2378 | |
Eric W. Biederman | a07e415 | 2010-02-11 15:23:05 -0800 | [diff] [blame] | 2379 | sysfs_attr_init(&a->attr); |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2380 | a->attr.name = b->attrname; |
| 2381 | snprintf(b->attrname, ATTR_LEN, "bank%d", i); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2382 | |
| 2383 | a->attr.mode = 0644; |
| 2384 | a->show = show_bank; |
| 2385 | a->store = set_bank; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2386 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2387 | } |
| 2388 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2389 | static __init int mcheck_init_device(void) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2390 | { |
| 2391 | int err; |
| 2392 | int i = 0; |
| 2393 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2394 | if (!mce_available(&boot_cpu_data)) |
| 2395 | return -EIO; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2396 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2397 | zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); |
Rusty Russell | 996867d | 2009-03-13 14:49:51 +1030 | [diff] [blame] | 2398 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2399 | mce_init_banks(); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2400 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2401 | err = subsys_system_register(&mce_subsys, NULL); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2402 | if (err) |
| 2403 | return err; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2404 | |
| 2405 | for_each_online_cpu(i) { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2406 | err = mce_device_create(i); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2407 | if (err) |
| 2408 | return err; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2409 | } |
| 2410 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 2411 | register_syscore_ops(&mce_syscore_ops); |
Chandra Seetharaman | be6b5a3 | 2006-07-30 03:03:37 -0700 | [diff] [blame] | 2412 | register_hotcpu_notifier(&mce_cpu_notifier); |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 2413 | |
| 2414 | /* register character device /dev/mcelog */ |
| 2415 | misc_register(&mce_chrdev_device); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2416 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2417 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2418 | } |
Liu, Jinsong | cef12ee | 2012-06-07 19:56:51 +0800 | [diff] [blame] | 2419 | device_initcall_sync(mcheck_init_device); |
Ingo Molnar | a988d33 | 2009-04-08 12:31:25 +0200 | [diff] [blame] | 2420 | |
Andi Kleen | d7c3c9a | 2009-04-28 23:07:25 +0200 | [diff] [blame] | 2421 | /* |
| 2422 | * Old style boot options parsing. Only for compatibility. |
| 2423 | */ |
| 2424 | static int __init mcheck_disable(char *str) |
| 2425 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 2426 | mca_cfg.disabled = true; |
Andi Kleen | d7c3c9a | 2009-04-28 23:07:25 +0200 | [diff] [blame] | 2427 | return 1; |
| 2428 | } |
| 2429 | __setup("nomce", mcheck_disable); |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 2430 | |
| 2431 | #ifdef CONFIG_DEBUG_FS |
| 2432 | struct dentry *mce_get_debugfs_dir(void) |
| 2433 | { |
| 2434 | static struct dentry *dmce; |
| 2435 | |
| 2436 | if (!dmce) |
| 2437 | dmce = debugfs_create_dir("mce", NULL); |
| 2438 | |
| 2439 | return dmce; |
| 2440 | } |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 2441 | |
| 2442 | static void mce_reset(void) |
| 2443 | { |
| 2444 | cpu_missing = 0; |
| 2445 | atomic_set(&mce_fake_paniced, 0); |
| 2446 | atomic_set(&mce_executing, 0); |
| 2447 | atomic_set(&mce_callin, 0); |
| 2448 | atomic_set(&global_nwo, 0); |
| 2449 | } |
| 2450 | |
| 2451 | static int fake_panic_get(void *data, u64 *val) |
| 2452 | { |
| 2453 | *val = fake_panic; |
| 2454 | return 0; |
| 2455 | } |
| 2456 | |
| 2457 | static int fake_panic_set(void *data, u64 val) |
| 2458 | { |
| 2459 | mce_reset(); |
| 2460 | fake_panic = val; |
| 2461 | return 0; |
| 2462 | } |
| 2463 | |
| 2464 | DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, |
| 2465 | fake_panic_set, "%llu\n"); |
| 2466 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2467 | static int __init mcheck_debugfs_init(void) |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 2468 | { |
| 2469 | struct dentry *dmce, *ffake_panic; |
| 2470 | |
| 2471 | dmce = mce_get_debugfs_dir(); |
| 2472 | if (!dmce) |
| 2473 | return -ENOMEM; |
| 2474 | ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL, |
| 2475 | &fake_panic_fops); |
| 2476 | if (!ffake_panic) |
| 2477 | return -ENOMEM; |
| 2478 | |
| 2479 | return 0; |
| 2480 | } |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2481 | late_initcall(mcheck_debugfs_init); |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 2482 | #endif |