Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Machine check handler. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 5 | * Rest from unknown author(s). |
| 6 | * 2004 Andi Kleen. Rewrote most of it. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 7 | * Copyright 2008 Intel Corporation |
| 8 | * Author: Andi Kleen |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 10 | |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 13 | #include <linux/thread_info.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 14 | #include <linux/capability.h> |
| 15 | #include <linux/miscdevice.h> |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 16 | #include <linux/ratelimit.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 17 | #include <linux/rcupdate.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 18 | #include <linux/kobject.h> |
Hidetoshi Seto | 14a0253 | 2009-04-30 16:04:51 +0900 | [diff] [blame] | 19 | #include <linux/uaccess.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 20 | #include <linux/kdebug.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/percpu.h> |
| 23 | #include <linux/string.h> |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 24 | #include <linux/device.h> |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 25 | #include <linux/syscore_ops.h> |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 26 | #include <linux/delay.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 27 | #include <linux/ctype.h> |
| 28 | #include <linux/sched.h> |
| 29 | #include <linux/sysfs.h> |
| 30 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 31 | #include <linux/slab.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 32 | #include <linux/init.h> |
| 33 | #include <linux/kmod.h> |
| 34 | #include <linux/poll.h> |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 35 | #include <linux/nmi.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 36 | #include <linux/cpu.h> |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 37 | #include <linux/ras.h> |
Hidetoshi Seto | 14a0253 | 2009-04-30 16:04:51 +0900 | [diff] [blame] | 38 | #include <linux/smp.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 39 | #include <linux/fs.h> |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 40 | #include <linux/mm.h> |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 41 | #include <linux/debugfs.h> |
Hidetoshi Seto | b77e70b | 2011-06-08 10:56:02 +0900 | [diff] [blame] | 42 | #include <linux/irq_work.h> |
Paul Gortmaker | 69c60c8 | 2011-05-26 12:22:53 -0400 | [diff] [blame] | 43 | #include <linux/export.h> |
Tony Luck | 3637efb | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 44 | #include <linux/jump_label.h> |
Dan Williams | 284ce40 | 2018-07-13 21:50:32 -0700 | [diff] [blame] | 45 | #include <linux/set_memory.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
Tony Luck | 3f5a789 | 2016-11-18 09:48:36 -0800 | [diff] [blame] | 47 | #include <asm/intel-family.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 48 | #include <asm/processor.h> |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 49 | #include <asm/traps.h> |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 50 | #include <asm/tlbflush.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 51 | #include <asm/mce.h> |
| 52 | #include <asm/msr.h> |
Xunlei Pang | 5bc3295 | 2017-03-13 10:50:19 +0100 | [diff] [blame] | 53 | #include <asm/reboot.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 54 | |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 55 | #include "mce-internal.h" |
Ingo Molnar | 711c2e4 | 2009-04-08 12:31:26 +0200 | [diff] [blame] | 56 | |
Tony Luck | 5de97c9 | 2017-03-27 11:33:03 +0200 | [diff] [blame] | 57 | static DEFINE_MUTEX(mce_log_mutex); |
Paul E. McKenney | f56e8a0 | 2010-03-05 15:03:27 -0800 | [diff] [blame] | 58 | |
Seunghun Han | b3b7c47 | 2018-03-06 15:21:43 +0100 | [diff] [blame] | 59 | /* sysfs synchronization */ |
| 60 | static DEFINE_MUTEX(mce_sysfs_mutex); |
| 61 | |
Hidetoshi Seto | 8968f9d | 2009-10-13 16:19:41 +0900 | [diff] [blame] | 62 | #define CREATE_TRACE_POINTS |
| 63 | #include <trace/events/mce.h> |
| 64 | |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 65 | #define SPINUNIT 100 /* 100ns */ |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 66 | |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 67 | DEFINE_PER_CPU(unsigned, mce_exception_count); |
| 68 | |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 69 | struct mce_bank *mce_banks __read_mostly; |
Aravind Gopalakrishnan | bf80bbd | 2015-03-23 10:42:52 -0500 | [diff] [blame] | 70 | struct mce_vendor_flags mce_flags __read_mostly; |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 71 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 72 | struct mca_config mca_cfg __read_mostly = { |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 73 | .bootlog = -1, |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 74 | /* |
| 75 | * Tolerant levels: |
| 76 | * 0: always panic on uncorrected errors, log corrected errors |
| 77 | * 1: panic or SIGBUS on uncorrected errors, log corrected errors |
| 78 | * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors |
| 79 | * 3: never panic or SIGBUS, log all errors (for testing only) |
| 80 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 81 | .tolerant = 1, |
| 82 | .monarch_timeout = -1 |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 83 | }; |
| 84 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 85 | static DEFINE_PER_CPU(struct mce, mces_seen); |
Tony Luck | 5de97c9 | 2017-03-27 11:33:03 +0200 | [diff] [blame] | 86 | static unsigned long mce_need_notify; |
| 87 | static int cpu_missing; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 88 | |
Naveen N. Rao | 0644414 | 2013-06-25 23:58:59 +0530 | [diff] [blame] | 89 | /* |
| 90 | * MCA banks polled by the period polling timer for corrected events. |
| 91 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). |
| 92 | */ |
Andi Kleen | ee031c3 | 2009-02-12 13:49:34 +0100 | [diff] [blame] | 93 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
| 94 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL |
| 95 | }; |
| 96 | |
Naveen N. Rao | c3d1fb5 | 2013-07-01 21:08:47 +0530 | [diff] [blame] | 97 | /* |
| 98 | * MCA banks controlled through firmware first for corrected errors. |
| 99 | * This is a global list of banks for which we won't enable CMCI and we |
| 100 | * won't poll. Firmware controls these banks and is responsible for |
| 101 | * reporting corrected errors through GHES. Uncorrected/recoverable |
| 102 | * errors are still notified through a machine check. |
| 103 | */ |
| 104 | mce_banks_t mce_banks_ce_disabled; |
| 105 | |
Chen, Gong | 061120a | 2015-08-12 18:29:35 +0200 | [diff] [blame] | 106 | static struct work_struct mce_work; |
| 107 | static struct irq_work mce_irq_work; |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 108 | |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 109 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); |
| 110 | |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 111 | /* |
| 112 | * CPU/chipset specific EDAC code can register a notifier call here to print |
| 113 | * MCE errors in a human-readable form. |
| 114 | */ |
Vishal Verma | 0dc9c63 | 2017-04-18 20:42:35 +0200 | [diff] [blame] | 115 | BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain); |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 116 | |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 117 | /* Do initial initialization of a struct mce */ |
| 118 | void mce_setup(struct mce *m) |
| 119 | { |
| 120 | memset(m, 0, sizeof(struct mce)); |
Andi Kleen | d620c67 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 121 | m->cpu = m->extcpu = smp_processor_id(); |
Arnd Bergmann | bc39f01 | 2018-06-22 11:54:22 +0200 | [diff] [blame] | 122 | /* need the internal __ version to avoid deadlocks */ |
| 123 | m->time = __ktime_get_real_seconds(); |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 124 | m->cpuvendor = boot_cpu_data.x86_vendor; |
| 125 | m->cpuid = cpuid_eax(1); |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 126 | m->socketid = cpu_data(m->extcpu).phys_proc_id; |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 127 | m->apicid = cpu_data(m->extcpu).initial_apicid; |
| 128 | rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); |
Tony Luck | 3f5a789 | 2016-11-18 09:48:36 -0800 | [diff] [blame] | 129 | |
| 130 | if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) |
| 131 | rdmsrl(MSR_PPIN, m->ppin); |
Tony Luck | fa94d0c | 2018-03-06 15:21:41 +0100 | [diff] [blame] | 132 | |
| 133 | m->microcode = boot_cpu_data.microcode; |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 134 | } |
| 135 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 136 | DEFINE_PER_CPU(struct mce, injectm); |
| 137 | EXPORT_PER_CPU_SYMBOL_GPL(injectm); |
| 138 | |
Borislav Petkov | fe3ed20 | 2017-03-27 11:33:00 +0200 | [diff] [blame] | 139 | void mce_log(struct mce *m) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | { |
Borislav Petkov | fe3ed20 | 2017-03-27 11:33:00 +0200 | [diff] [blame] | 141 | if (!mce_gen_pool_add(m)) |
Chen, Gong | f29a7af | 2015-08-12 18:29:37 +0200 | [diff] [blame] | 142 | irq_work_queue(&mce_irq_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | } |
| 144 | |
Borislav Petkov | a79da38 | 2015-08-12 18:29:44 +0200 | [diff] [blame] | 145 | void mce_inject_log(struct mce *m) |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 146 | { |
Tony Luck | 5de97c9 | 2017-03-27 11:33:03 +0200 | [diff] [blame] | 147 | mutex_lock(&mce_log_mutex); |
Borislav Petkov | a79da38 | 2015-08-12 18:29:44 +0200 | [diff] [blame] | 148 | mce_log(m); |
Tony Luck | 5de97c9 | 2017-03-27 11:33:03 +0200 | [diff] [blame] | 149 | mutex_unlock(&mce_log_mutex); |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 150 | } |
Borislav Petkov | a79da38 | 2015-08-12 18:29:44 +0200 | [diff] [blame] | 151 | EXPORT_SYMBOL_GPL(mce_inject_log); |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 152 | |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 153 | static struct notifier_block mce_srao_nb; |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 154 | |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 155 | /* |
| 156 | * We run the default notifier if we have only the SRAO, the first and the |
| 157 | * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS |
| 158 | * notifiers registered on the chain. |
| 159 | */ |
| 160 | #define NUM_DEFAULT_NOTIFIERS 3 |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 161 | static atomic_t num_notifiers; |
| 162 | |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 163 | void mce_register_decode_chain(struct notifier_block *nb) |
| 164 | { |
Borislav Petkov | 415601b | 2017-04-18 09:33:28 +0200 | [diff] [blame] | 165 | if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC)) |
Borislav Petkov | 32b40a8 | 2017-03-27 11:33:04 +0200 | [diff] [blame] | 166 | return; |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 167 | |
Borislav Petkov | 32b40a8 | 2017-03-27 11:33:04 +0200 | [diff] [blame] | 168 | atomic_inc(&num_notifiers); |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 169 | |
Vishal Verma | 0dc9c63 | 2017-04-18 20:42:35 +0200 | [diff] [blame] | 170 | blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 171 | } |
| 172 | EXPORT_SYMBOL_GPL(mce_register_decode_chain); |
| 173 | |
| 174 | void mce_unregister_decode_chain(struct notifier_block *nb) |
| 175 | { |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 176 | atomic_dec(&num_notifiers); |
| 177 | |
Vishal Verma | 0dc9c63 | 2017-04-18 20:42:35 +0200 | [diff] [blame] | 178 | blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb); |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 179 | } |
| 180 | EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); |
| 181 | |
Yazen Ghannam | a9750a3 | 2016-04-30 14:33:54 +0200 | [diff] [blame] | 182 | static inline u32 ctl_reg(int bank) |
| 183 | { |
| 184 | return MSR_IA32_MCx_CTL(bank); |
| 185 | } |
| 186 | |
| 187 | static inline u32 status_reg(int bank) |
| 188 | { |
| 189 | return MSR_IA32_MCx_STATUS(bank); |
| 190 | } |
| 191 | |
| 192 | static inline u32 addr_reg(int bank) |
| 193 | { |
| 194 | return MSR_IA32_MCx_ADDR(bank); |
| 195 | } |
| 196 | |
| 197 | static inline u32 misc_reg(int bank) |
| 198 | { |
| 199 | return MSR_IA32_MCx_MISC(bank); |
| 200 | } |
| 201 | |
| 202 | static inline u32 smca_ctl_reg(int bank) |
| 203 | { |
| 204 | return MSR_AMD64_SMCA_MCx_CTL(bank); |
| 205 | } |
| 206 | |
| 207 | static inline u32 smca_status_reg(int bank) |
| 208 | { |
| 209 | return MSR_AMD64_SMCA_MCx_STATUS(bank); |
| 210 | } |
| 211 | |
| 212 | static inline u32 smca_addr_reg(int bank) |
| 213 | { |
| 214 | return MSR_AMD64_SMCA_MCx_ADDR(bank); |
| 215 | } |
| 216 | |
| 217 | static inline u32 smca_misc_reg(int bank) |
| 218 | { |
| 219 | return MSR_AMD64_SMCA_MCx_MISC(bank); |
| 220 | } |
| 221 | |
| 222 | struct mca_msr_regs msr_ops = { |
| 223 | .ctl = ctl_reg, |
| 224 | .status = status_reg, |
| 225 | .addr = addr_reg, |
| 226 | .misc = misc_reg |
| 227 | }; |
| 228 | |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 229 | static void __print_mce(struct mce *m) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | { |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 231 | pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", |
| 232 | m->extcpu, |
| 233 | (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""), |
| 234 | m->mcgstatus, m->bank, m->status); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 235 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 236 | if (m->ip) { |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 237 | pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 238 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 239 | m->cs, m->ip); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 240 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | if (m->cs == __KERNEL_CS) |
Borislav Petkov | c80c5ec | 2018-02-10 15:53:14 +0100 | [diff] [blame] | 242 | pr_cont("{%pS}", (void *)(unsigned long)m->ip); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 243 | pr_cont("\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | } |
Borislav Petkov | 549d042 | 2009-07-24 13:51:42 +0200 | [diff] [blame] | 245 | |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 246 | pr_emerg(HW_ERR "TSC %llx ", m->tsc); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 247 | if (m->addr) |
| 248 | pr_cont("ADDR %llx ", m->addr); |
| 249 | if (m->misc) |
| 250 | pr_cont("MISC %llx ", m->misc); |
| 251 | |
Yazen Ghannam | 4b711f9 | 2016-09-12 09:59:38 +0200 | [diff] [blame] | 252 | if (mce_flags.smca) { |
| 253 | if (m->synd) |
| 254 | pr_cont("SYND %llx ", m->synd); |
| 255 | if (m->ipid) |
| 256 | pr_cont("IPID %llx ", m->ipid); |
| 257 | } |
| 258 | |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 259 | pr_cont("\n"); |
Andi Kleen | 506ed6b | 2011-10-12 17:46:33 -0700 | [diff] [blame] | 260 | /* |
| 261 | * Note this output is parsed by external tools and old fields |
| 262 | * should not be changed. |
| 263 | */ |
Borislav Petkov | 881e23e | 2011-10-17 16:45:10 +0200 | [diff] [blame] | 264 | pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", |
Andi Kleen | 506ed6b | 2011-10-12 17:46:33 -0700 | [diff] [blame] | 265 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, |
Tony Luck | fa94d0c | 2018-03-06 15:21:41 +0100 | [diff] [blame] | 266 | m->microcode); |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | static void print_mce(struct mce *m) |
| 270 | { |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 271 | __print_mce(m); |
Borislav Petkov | b2fbf6f | 2018-02-21 11:18:55 +0100 | [diff] [blame] | 272 | |
Pu Wen | ac78bd7 | 2018-09-23 17:36:04 +0800 | [diff] [blame] | 273 | if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON) |
Borislav Petkov | b2fbf6f | 2018-02-21 11:18:55 +0100 | [diff] [blame] | 274 | pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); |
Andi Kleen | 8650356 | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 275 | } |
| 276 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 277 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
| 278 | |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 279 | static atomic_t mce_panicked; |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 280 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 281 | static int fake_panic; |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 282 | static atomic_t mce_fake_panicked; |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 283 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 284 | /* Panic in progress. Enable interrupts and wait for final IPI */ |
| 285 | static void wait_for_panic(void) |
| 286 | { |
| 287 | long timeout = PANIC_TIMEOUT*USEC_PER_SEC; |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 288 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 289 | preempt_disable(); |
| 290 | local_irq_enable(); |
| 291 | while (timeout-- > 0) |
| 292 | udelay(1); |
Andi Kleen | 29b0f59 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 293 | if (panic_timeout == 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 294 | panic_timeout = mca_cfg.panic_timeout; |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 295 | panic("Panicing machine check CPU died"); |
| 296 | } |
| 297 | |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 298 | static void mce_panic(const char *msg, struct mce *final, char *exp) |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 299 | { |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 300 | int apei_err = 0; |
| 301 | struct llist_node *pending; |
| 302 | struct mce_evt_llist *l; |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 303 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 304 | if (!fake_panic) { |
| 305 | /* |
| 306 | * Make sure only one CPU runs in machine check panic |
| 307 | */ |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 308 | if (atomic_inc_return(&mce_panicked) > 1) |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 309 | wait_for_panic(); |
| 310 | barrier(); |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 311 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 312 | bust_spinlocks(1); |
| 313 | console_verbose(); |
| 314 | } else { |
| 315 | /* Don't log too much for fake panic */ |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 316 | if (atomic_inc_return(&mce_fake_panicked) > 1) |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 317 | return; |
| 318 | } |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 319 | pending = mce_gen_pool_prepare_records(); |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 320 | /* First print corrected ones that are still unlogged */ |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 321 | llist_for_each_entry(l, pending, llnode) { |
| 322 | struct mce *m = &l->mce; |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 323 | if (!(m->status & MCI_STATUS_UC)) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 324 | print_mce(m); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 325 | if (!apei_err) |
| 326 | apei_err = apei_write_mce(m); |
| 327 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | } |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 329 | /* Now print uncorrected but with the final one last */ |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 330 | llist_for_each_entry(l, pending, llnode) { |
| 331 | struct mce *m = &l->mce; |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 332 | if (!(m->status & MCI_STATUS_UC)) |
| 333 | continue; |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 334 | if (!final || mce_cmp(m, final)) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 335 | print_mce(m); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 336 | if (!apei_err) |
| 337 | apei_err = apei_write_mce(m); |
| 338 | } |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 339 | } |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 340 | if (final) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 341 | print_mce(final); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 342 | if (!apei_err) |
| 343 | apei_err = apei_write_mce(final); |
| 344 | } |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 345 | if (cpu_missing) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 346 | pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n"); |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 347 | if (exp) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 348 | pr_emerg(HW_ERR "Machine check: %s\n", exp); |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 349 | if (!fake_panic) { |
| 350 | if (panic_timeout == 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 351 | panic_timeout = mca_cfg.panic_timeout; |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 352 | panic(msg); |
| 353 | } else |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 354 | pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 355 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 357 | /* Support code for software error injection */ |
| 358 | |
| 359 | static int msr_to_offset(u32 msr) |
| 360 | { |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 361 | unsigned bank = __this_cpu_read(injectm.bank); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 362 | |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 363 | if (msr == mca_cfg.rip_msr) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 364 | return offsetof(struct mce, ip); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 365 | if (msr == msr_ops.status(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 366 | return offsetof(struct mce, status); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 367 | if (msr == msr_ops.addr(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 368 | return offsetof(struct mce, addr); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 369 | if (msr == msr_ops.misc(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 370 | return offsetof(struct mce, misc); |
| 371 | if (msr == MSR_IA32_MCG_STATUS) |
| 372 | return offsetof(struct mce, mcgstatus); |
| 373 | return -1; |
| 374 | } |
| 375 | |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 376 | /* MSR access wrappers used for error injection */ |
| 377 | static u64 mce_rdmsrl(u32 msr) |
| 378 | { |
| 379 | u64 v; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 380 | |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 381 | if (__this_cpu_read(injectm.finished)) { |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 382 | int offset = msr_to_offset(msr); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 383 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 384 | if (offset < 0) |
| 385 | return 0; |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 386 | return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 387 | } |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 388 | |
| 389 | if (rdmsrl_safe(msr, &v)) { |
Borislav Petkov | 38c54cc | 2016-07-08 11:09:41 +0200 | [diff] [blame] | 390 | WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 391 | /* |
| 392 | * Return zero in case the access faulted. This should |
| 393 | * not happen normally but can happen if the CPU does |
| 394 | * something weird, or if the code is buggy. |
| 395 | */ |
| 396 | v = 0; |
| 397 | } |
| 398 | |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 399 | return v; |
| 400 | } |
| 401 | |
| 402 | static void mce_wrmsrl(u32 msr, u64 v) |
| 403 | { |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 404 | if (__this_cpu_read(injectm.finished)) { |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 405 | int offset = msr_to_offset(msr); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 406 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 407 | if (offset >= 0) |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 408 | *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 409 | return; |
| 410 | } |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 411 | wrmsrl(msr, v); |
| 412 | } |
| 413 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 414 | /* |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 415 | * Collect all global (w.r.t. this processor) status about this machine |
| 416 | * check into our "mce" struct so that we can use it later to assess |
| 417 | * the severity of the problem as we read per-bank specific details. |
| 418 | */ |
| 419 | static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) |
| 420 | { |
| 421 | mce_setup(m); |
| 422 | |
| 423 | m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); |
| 424 | if (regs) { |
| 425 | /* |
| 426 | * Get the address of the instruction at the time of |
| 427 | * the machine check error. |
| 428 | */ |
| 429 | if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { |
| 430 | m->ip = regs->ip; |
| 431 | m->cs = regs->cs; |
Andi Kleen | a129a7c | 2010-11-19 13:16:22 +0100 | [diff] [blame] | 432 | |
| 433 | /* |
| 434 | * When in VM86 mode make the cs look like ring 3 |
| 435 | * always. This is a lie, but it's better than passing |
| 436 | * the additional vm86 bit around everywhere. |
| 437 | */ |
| 438 | if (v8086_mode(regs)) |
| 439 | m->cs |= 3; |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 440 | } |
| 441 | /* Use accurate RIP reporting if available. */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 442 | if (mca_cfg.rip_msr) |
| 443 | m->ip = mce_rdmsrl(mca_cfg.rip_msr); |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 444 | } |
| 445 | } |
| 446 | |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 447 | int mce_available(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 449 | if (mca_cfg.disabled) |
Andi Kleen | 5b4408f | 2009-02-12 13:39:30 +0100 | [diff] [blame] | 450 | return 0; |
Akinobu Mita | 3d1712c | 2006-03-24 03:15:11 -0800 | [diff] [blame] | 451 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | } |
| 453 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 454 | static void mce_schedule_work(void) |
| 455 | { |
Tejun Heo | a2c2727 | 2016-09-16 15:49:32 -0400 | [diff] [blame] | 456 | if (!mce_gen_pool_empty()) |
Chen, Gong | 061120a | 2015-08-12 18:29:35 +0200 | [diff] [blame] | 457 | schedule_work(&mce_work); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 458 | } |
| 459 | |
Hidetoshi Seto | b77e70b | 2011-06-08 10:56:02 +0900 | [diff] [blame] | 460 | static void mce_irq_work_cb(struct irq_work *entry) |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 461 | { |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 462 | mce_schedule_work(); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 463 | } |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 464 | |
| 465 | static void mce_report_event(struct pt_regs *regs) |
| 466 | { |
| 467 | if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 468 | mce_notify_irq(); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 469 | /* |
| 470 | * Triggering the work queue here is just an insurance |
| 471 | * policy in case the syscall exit notify handler |
| 472 | * doesn't run soon enough or ends up running on the |
| 473 | * wrong CPU (can happen when audit sleeps) |
| 474 | */ |
| 475 | mce_schedule_work(); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 476 | return; |
| 477 | } |
| 478 | |
Chen, Gong | 061120a | 2015-08-12 18:29:35 +0200 | [diff] [blame] | 479 | irq_work_queue(&mce_irq_work); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 480 | } |
| 481 | |
Borislav Petkov | feab21f | 2015-11-24 08:41:20 +0100 | [diff] [blame] | 482 | /* |
| 483 | * Check if the address reported by the CPU is in a format we can parse. |
| 484 | * It would be possible to add code for most other cases, but all would |
| 485 | * be somewhat complicated (e.g. segment offset would require an instruction |
| 486 | * parser). So only support physical addresses up to page granuality for now. |
| 487 | */ |
Vishal Verma | e8a308e | 2018-10-25 18:37:29 -0600 | [diff] [blame] | 488 | int mce_usable_address(struct mce *m) |
Borislav Petkov | feab21f | 2015-11-24 08:41:20 +0100 | [diff] [blame] | 489 | { |
Borislav Petkov | c6a9583 | 2017-04-18 20:39:24 +0200 | [diff] [blame] | 490 | if (!(m->status & MCI_STATUS_ADDRV)) |
Borislav Petkov | feab21f | 2015-11-24 08:41:20 +0100 | [diff] [blame] | 491 | return 0; |
| 492 | |
| 493 | /* Checks after this one are Intel-specific: */ |
| 494 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
| 495 | return 1; |
| 496 | |
Borislav Petkov | c6a9583 | 2017-04-18 20:39:24 +0200 | [diff] [blame] | 497 | if (!(m->status & MCI_STATUS_MISCV)) |
| 498 | return 0; |
| 499 | |
Borislav Petkov | feab21f | 2015-11-24 08:41:20 +0100 | [diff] [blame] | 500 | if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) |
| 501 | return 0; |
Borislav Petkov | c6a9583 | 2017-04-18 20:39:24 +0200 | [diff] [blame] | 502 | |
Borislav Petkov | feab21f | 2015-11-24 08:41:20 +0100 | [diff] [blame] | 503 | if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) |
| 504 | return 0; |
Borislav Petkov | c6a9583 | 2017-04-18 20:39:24 +0200 | [diff] [blame] | 505 | |
Borislav Petkov | feab21f | 2015-11-24 08:41:20 +0100 | [diff] [blame] | 506 | return 1; |
| 507 | } |
Vishal Verma | e8a308e | 2018-10-25 18:37:29 -0600 | [diff] [blame] | 508 | EXPORT_SYMBOL_GPL(mce_usable_address); |
Borislav Petkov | feab21f | 2015-11-24 08:41:20 +0100 | [diff] [blame] | 509 | |
Borislav Petkov | 2d1f406 | 2017-05-19 11:39:09 +0200 | [diff] [blame] | 510 | bool mce_is_memory_error(struct mce *m) |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 511 | { |
Pu Wen | ac78bd7 | 2018-09-23 17:36:04 +0800 | [diff] [blame] | 512 | if (m->cpuvendor == X86_VENDOR_AMD || |
| 513 | m->cpuvendor == X86_VENDOR_HYGON) { |
Yazen Ghannam | c6708d5 | 2017-12-18 12:37:13 +0100 | [diff] [blame] | 514 | return amd_mce_is_memory_error(m); |
Borislav Petkov | 2d1f406 | 2017-05-19 11:39:09 +0200 | [diff] [blame] | 515 | } else if (m->cpuvendor == X86_VENDOR_INTEL) { |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 516 | /* |
| 517 | * Intel SDM Volume 3B - 15.9.2 Compound Error Codes |
| 518 | * |
| 519 | * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for |
| 520 | * indicating a memory error. Bit 8 is used for indicating a |
| 521 | * cache hierarchy error. The combination of bit 2 and bit 3 |
| 522 | * is used for indicating a `generic' cache hierarchy error |
| 523 | * But we can't just blindly check the above bits, because if |
| 524 | * bit 11 is set, then it is a bus/interconnect error - and |
| 525 | * either way the above bits just gives more detail on what |
| 526 | * bus/interconnect error happened. Note that bit 12 can be |
| 527 | * ignored, as it's the "filter" bit. |
| 528 | */ |
| 529 | return (m->status & 0xef80) == BIT(7) || |
| 530 | (m->status & 0xef00) == BIT(8) || |
| 531 | (m->status & 0xeffc) == 0xc; |
| 532 | } |
| 533 | |
| 534 | return false; |
| 535 | } |
Borislav Petkov | 2d1f406 | 2017-05-19 11:39:09 +0200 | [diff] [blame] | 536 | EXPORT_SYMBOL_GPL(mce_is_memory_error); |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 537 | |
Vishal Verma | 5d96c93 | 2018-10-25 18:37:28 -0600 | [diff] [blame] | 538 | bool mce_is_correctable(struct mce *m) |
Yazen Ghannam | 179eb85 | 2017-12-18 12:37:14 +0100 | [diff] [blame] | 539 | { |
| 540 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) |
| 541 | return false; |
| 542 | |
Pu Wen | ac78bd7 | 2018-09-23 17:36:04 +0800 | [diff] [blame] | 543 | if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) |
| 544 | return false; |
| 545 | |
Yazen Ghannam | 179eb85 | 2017-12-18 12:37:14 +0100 | [diff] [blame] | 546 | if (m->status & MCI_STATUS_UC) |
| 547 | return false; |
| 548 | |
| 549 | return true; |
| 550 | } |
Vishal Verma | 5d96c93 | 2018-10-25 18:37:28 -0600 | [diff] [blame] | 551 | EXPORT_SYMBOL_GPL(mce_is_correctable); |
Yazen Ghannam | 179eb85 | 2017-12-18 12:37:14 +0100 | [diff] [blame] | 552 | |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 553 | static bool cec_add_mce(struct mce *m) |
| 554 | { |
| 555 | if (!m) |
| 556 | return false; |
| 557 | |
| 558 | /* We eat only correctable DRAM errors with usable addresses. */ |
Borislav Petkov | 2d1f406 | 2017-05-19 11:39:09 +0200 | [diff] [blame] | 559 | if (mce_is_memory_error(m) && |
Yazen Ghannam | 179eb85 | 2017-12-18 12:37:14 +0100 | [diff] [blame] | 560 | mce_is_correctable(m) && |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 561 | mce_usable_address(m)) |
| 562 | if (!cec_add_elem(m->addr >> PAGE_SHIFT)) |
| 563 | return true; |
| 564 | |
| 565 | return false; |
| 566 | } |
| 567 | |
| 568 | static int mce_first_notifier(struct notifier_block *nb, unsigned long val, |
| 569 | void *data) |
| 570 | { |
| 571 | struct mce *m = (struct mce *)data; |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 572 | |
| 573 | if (!m) |
| 574 | return NOTIFY_DONE; |
| 575 | |
| 576 | if (cec_add_mce(m)) |
| 577 | return NOTIFY_STOP; |
| 578 | |
| 579 | /* Emit the trace record: */ |
| 580 | trace_mce_record(m); |
| 581 | |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 582 | set_bit(0, &mce_need_notify); |
| 583 | |
| 584 | mce_notify_irq(); |
| 585 | |
| 586 | return NOTIFY_DONE; |
| 587 | } |
| 588 | |
| 589 | static struct notifier_block first_nb = { |
| 590 | .notifier_call = mce_first_notifier, |
| 591 | .priority = MCE_PRIO_FIRST, |
| 592 | }; |
| 593 | |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 594 | static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, |
| 595 | void *data) |
| 596 | { |
| 597 | struct mce *mce = (struct mce *)data; |
| 598 | unsigned long pfn; |
| 599 | |
| 600 | if (!mce) |
| 601 | return NOTIFY_DONE; |
| 602 | |
Borislav Petkov | c0ec382 | 2015-11-24 08:41:18 +0100 | [diff] [blame] | 603 | if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 604 | pfn = mce->addr >> PAGE_SHIFT; |
Tony Luck | fd0e786 | 2018-01-25 14:23:48 -0800 | [diff] [blame] | 605 | if (!memory_failure(pfn, 0)) |
Dan Williams | 284ce40 | 2018-07-13 21:50:32 -0700 | [diff] [blame] | 606 | set_mce_nospec(pfn); |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 607 | } |
| 608 | |
| 609 | return NOTIFY_OK; |
| 610 | } |
| 611 | static struct notifier_block mce_srao_nb = { |
| 612 | .notifier_call = srao_decode_notifier, |
Borislav Petkov | 9026cc8 | 2017-01-23 19:35:14 +0100 | [diff] [blame] | 613 | .priority = MCE_PRIO_SRAO, |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 614 | }; |
| 615 | |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 616 | static int mce_default_notifier(struct notifier_block *nb, unsigned long val, |
| 617 | void *data) |
| 618 | { |
| 619 | struct mce *m = (struct mce *)data; |
| 620 | |
| 621 | if (!m) |
| 622 | return NOTIFY_DONE; |
| 623 | |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 624 | if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS) |
Andi Kleen | cc66afe | 2017-03-27 11:32:59 +0200 | [diff] [blame] | 625 | return NOTIFY_DONE; |
| 626 | |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 627 | __print_mce(m); |
| 628 | |
| 629 | return NOTIFY_DONE; |
| 630 | } |
| 631 | |
| 632 | static struct notifier_block mce_default_nb = { |
| 633 | .notifier_call = mce_default_notifier, |
| 634 | /* lowest prio, we want it to run last. */ |
Borislav Petkov | 9026cc8 | 2017-01-23 19:35:14 +0100 | [diff] [blame] | 635 | .priority = MCE_PRIO_LOWEST, |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 636 | }; |
| 637 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 638 | /* |
| 639 | * Read ADDR and MISC registers. |
| 640 | */ |
| 641 | static void mce_read_aux(struct mce *m, int i) |
| 642 | { |
| 643 | if (m->status & MCI_STATUS_MISCV) |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 644 | m->misc = mce_rdmsrl(msr_ops.misc(i)); |
Yazen Ghannam | db819d6 | 2016-09-12 09:59:28 +0200 | [diff] [blame] | 645 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 646 | if (m->status & MCI_STATUS_ADDRV) { |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 647 | m->addr = mce_rdmsrl(msr_ops.addr(i)); |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 648 | |
| 649 | /* |
| 650 | * Mask the reported address by the reported granularity. |
| 651 | */ |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 652 | if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 653 | u8 shift = MCI_MISC_ADDR_LSB(m->misc); |
| 654 | m->addr >>= shift; |
| 655 | m->addr <<= shift; |
| 656 | } |
Yazen Ghannam | 4f29b73 | 2016-09-12 09:59:39 +0200 | [diff] [blame] | 657 | |
| 658 | /* |
| 659 | * Extract [55:<lsb>] where lsb is the least significant |
| 660 | * *valid* bit of the address bits. |
| 661 | */ |
| 662 | if (mce_flags.smca) { |
| 663 | u8 lsb = (m->addr >> 56) & 0x3f; |
| 664 | |
| 665 | m->addr &= GENMASK_ULL(55, lsb); |
| 666 | } |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 667 | } |
Yazen Ghannam | db819d6 | 2016-09-12 09:59:28 +0200 | [diff] [blame] | 668 | |
Yazen Ghannam | 5828c46 | 2016-09-12 09:59:37 +0200 | [diff] [blame] | 669 | if (mce_flags.smca) { |
| 670 | m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i)); |
| 671 | |
| 672 | if (m->status & MCI_STATUS_SYNDV) |
| 673 | m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i)); |
| 674 | } |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 675 | } |
| 676 | |
Andi Kleen | ca84f69 | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 677 | DEFINE_PER_CPU(unsigned, mce_poll_count); |
| 678 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 679 | /* |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 680 | * Poll for corrected events or events that happened before reset. |
| 681 | * Those are just logged through /dev/mcelog. |
| 682 | * |
| 683 | * This is executed in standard interrupt context. |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 684 | * |
| 685 | * Note: spec recommends to panic for fatal unsignalled |
| 686 | * errors here. However this would be quite problematic -- |
| 687 | * we would need to reimplement the Monarch handling and |
| 688 | * it would mess up the exclusion between exception handler |
| 689 | * and poll hander -- * so we skip this for now. |
| 690 | * These cases should not happen anyways, or only when the CPU |
| 691 | * is already totally * confused. In this case it's likely it will |
| 692 | * not fully execute the machine check handler either. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 693 | */ |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 694 | bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 695 | { |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 696 | bool error_seen = false; |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 697 | struct mce m; |
| 698 | int i; |
| 699 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 700 | this_cpu_inc(mce_poll_count); |
Andi Kleen | ca84f69 | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 701 | |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 702 | mce_gather_info(&m, NULL); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 703 | |
Borislav Petkov | 669c00f | 2017-01-23 19:35:09 +0100 | [diff] [blame] | 704 | if (flags & MCP_TIMESTAMP) |
| 705 | m.tsc = rdtsc(); |
Borislav Petkov | 5446735 | 2016-11-10 14:10:53 +0100 | [diff] [blame] | 706 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 707 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 708 | if (!mce_banks[i].ctl || !test_bit(i, *b)) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 709 | continue; |
| 710 | |
| 711 | m.misc = 0; |
| 712 | m.addr = 0; |
| 713 | m.bank = i; |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 714 | |
| 715 | barrier(); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 716 | m.status = mce_rdmsrl(msr_ops.status(i)); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 717 | if (!(m.status & MCI_STATUS_VAL)) |
| 718 | continue; |
| 719 | |
| 720 | /* |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 721 | * Uncorrected or signalled events are handled by the exception |
| 722 | * handler when it is enabled, so don't process those here. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 723 | * |
| 724 | * TBD do the same check for MCI_STATUS_EN here? |
| 725 | */ |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 726 | if (!(flags & MCP_UC) && |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 727 | (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 728 | continue; |
| 729 | |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 730 | error_seen = true; |
| 731 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 732 | mce_read_aux(&m, i); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 733 | |
Yazen Ghannam | e2de64e | 2017-06-26 14:35:31 +0200 | [diff] [blame] | 734 | m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); |
Chen Yucong | fa92c58 | 2014-11-18 10:09:20 +0800 | [diff] [blame] | 735 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 736 | /* |
| 737 | * Don't get the IP here because it's unlikely to |
| 738 | * have anything to do with the actual error location. |
| 739 | */ |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 740 | if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) |
Andi Kleen | 5679af4 | 2009-04-07 17:06:55 +0200 | [diff] [blame] | 741 | mce_log(&m); |
Borislav Petkov | c0ec382 | 2015-11-24 08:41:18 +0100 | [diff] [blame] | 742 | else if (mce_usable_address(&m)) { |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 743 | /* |
| 744 | * Although we skipped logging this, we still want |
| 745 | * to take action. Add to the pool so the registered |
| 746 | * notifiers will see it. |
| 747 | */ |
| 748 | if (!mce_gen_pool_add(&m)) |
| 749 | mce_schedule_work(); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 750 | } |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 751 | |
| 752 | /* |
| 753 | * Clear state for this bank. |
| 754 | */ |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 755 | mce_wrmsrl(msr_ops.status(i), 0); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 756 | } |
| 757 | |
| 758 | /* |
| 759 | * Don't clear MCG_STATUS here because it's only defined for |
| 760 | * exceptions. |
| 761 | */ |
Andi Kleen | 88921be | 2009-05-27 21:56:51 +0200 | [diff] [blame] | 762 | |
| 763 | sync_core(); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 764 | |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 765 | return error_seen; |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 766 | } |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 767 | EXPORT_SYMBOL_GPL(machine_check_poll); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 768 | |
| 769 | /* |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 770 | * Do a quick check if any of the events requires a panic. |
| 771 | * This decides if we keep the events around or clear them. |
| 772 | */ |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 773 | static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, |
| 774 | struct pt_regs *regs) |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 775 | { |
Borislav Petkov | 17fea54 | 2015-05-18 10:07:17 +0200 | [diff] [blame] | 776 | char *tmp; |
Borislav Petkov | 1f74c8a | 2018-06-22 11:54:28 +0200 | [diff] [blame] | 777 | int i; |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 778 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 779 | for (i = 0; i < mca_cfg.banks; i++) { |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 780 | m->status = mce_rdmsrl(msr_ops.status(i)); |
Borislav Petkov | 1f74c8a | 2018-06-22 11:54:28 +0200 | [diff] [blame] | 781 | if (!(m->status & MCI_STATUS_VAL)) |
| 782 | continue; |
| 783 | |
| 784 | __set_bit(i, validp); |
| 785 | if (quirk_no_way_out) |
| 786 | quirk_no_way_out(i, m, regs); |
Borislav Petkov | 17fea54 | 2015-05-18 10:07:17 +0200 | [diff] [blame] | 787 | |
| 788 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { |
Borislav Petkov | 1f74c8a | 2018-06-22 11:54:28 +0200 | [diff] [blame] | 789 | mce_read_aux(m, i); |
Borislav Petkov | 17fea54 | 2015-05-18 10:07:17 +0200 | [diff] [blame] | 790 | *msg = tmp; |
Borislav Petkov | 1f74c8a | 2018-06-22 11:54:28 +0200 | [diff] [blame] | 791 | return 1; |
Borislav Petkov | 17fea54 | 2015-05-18 10:07:17 +0200 | [diff] [blame] | 792 | } |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 793 | } |
Borislav Petkov | 1f74c8a | 2018-06-22 11:54:28 +0200 | [diff] [blame] | 794 | return 0; |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 795 | } |
| 796 | |
| 797 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 798 | * Variable to establish order between CPUs while scanning. |
| 799 | * Each CPU spins initially until executing is equal its number. |
| 800 | */ |
| 801 | static atomic_t mce_executing; |
| 802 | |
| 803 | /* |
| 804 | * Defines order of CPUs on entry. First CPU becomes Monarch. |
| 805 | */ |
| 806 | static atomic_t mce_callin; |
| 807 | |
| 808 | /* |
| 809 | * Check if a timeout waiting for other CPUs happened. |
| 810 | */ |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 811 | static int mce_timed_out(u64 *t, const char *msg) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 812 | { |
| 813 | /* |
| 814 | * The others already did panic for some reason. |
| 815 | * Bail out like in a timeout. |
| 816 | * rmb() to tell the compiler that system_state |
| 817 | * might have been modified by someone else. |
| 818 | */ |
| 819 | rmb(); |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 820 | if (atomic_read(&mce_panicked)) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 821 | wait_for_panic(); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 822 | if (!mca_cfg.monarch_timeout) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 823 | goto out; |
| 824 | if ((s64)*t < SPINUNIT) { |
Borislav Petkov | 716079f | 2014-05-23 11:06:35 +0200 | [diff] [blame] | 825 | if (mca_cfg.tolerant <= 1) |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 826 | mce_panic(msg, NULL, NULL); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 827 | cpu_missing = 1; |
| 828 | return 1; |
| 829 | } |
| 830 | *t -= SPINUNIT; |
| 831 | out: |
| 832 | touch_nmi_watchdog(); |
| 833 | return 0; |
| 834 | } |
| 835 | |
| 836 | /* |
| 837 | * The Monarch's reign. The Monarch is the CPU who entered |
| 838 | * the machine check handler first. It waits for the others to |
| 839 | * raise the exception too and then grades them. When any |
| 840 | * error is fatal panic. Only then let the others continue. |
| 841 | * |
| 842 | * The other CPUs entering the MCE handler will be controlled by the |
| 843 | * Monarch. They are called Subjects. |
| 844 | * |
| 845 | * This way we prevent any potential data corruption in a unrecoverable case |
| 846 | * and also makes sure always all CPU's errors are examined. |
| 847 | * |
Hidetoshi Seto | 680b6cf | 2009-08-26 16:20:36 +0900 | [diff] [blame] | 848 | * Also this detects the case of a machine check event coming from outer |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 849 | * space (not detected by any CPUs) In this case some external agent wants |
| 850 | * us to shut down, so panic too. |
| 851 | * |
| 852 | * The other CPUs might still decide to panic if the handler happens |
| 853 | * in a unrecoverable place, but in this case the system is in a semi-stable |
| 854 | * state and won't corrupt anything by itself. It's ok to let the others |
| 855 | * continue for a bit first. |
| 856 | * |
| 857 | * All the spin loops have timeouts; when a timeout happens a CPU |
| 858 | * typically elects itself to be Monarch. |
| 859 | */ |
| 860 | static void mce_reign(void) |
| 861 | { |
| 862 | int cpu; |
| 863 | struct mce *m = NULL; |
| 864 | int global_worst = 0; |
| 865 | char *msg = NULL; |
| 866 | char *nmsg = NULL; |
| 867 | |
| 868 | /* |
| 869 | * This CPU is the Monarch and the other CPUs have run |
| 870 | * through their handlers. |
| 871 | * Grade the severity of the errors of all the CPUs. |
| 872 | */ |
| 873 | for_each_possible_cpu(cpu) { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 874 | int severity = mce_severity(&per_cpu(mces_seen, cpu), |
| 875 | mca_cfg.tolerant, |
Chen Yucong | e348027 | 2014-11-18 10:09:19 +0800 | [diff] [blame] | 876 | &nmsg, true); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 877 | if (severity > global_worst) { |
| 878 | msg = nmsg; |
| 879 | global_worst = severity; |
| 880 | m = &per_cpu(mces_seen, cpu); |
| 881 | } |
| 882 | } |
| 883 | |
| 884 | /* |
| 885 | * Cannot recover? Panic here then. |
| 886 | * This dumps all the mces in the log buffer and stops the |
| 887 | * other CPUs. |
| 888 | */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 889 | if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) |
Derek Che | 8af7043 | 2015-02-02 10:30:21 -0800 | [diff] [blame] | 890 | mce_panic("Fatal machine check", m, msg); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 891 | |
| 892 | /* |
| 893 | * For UC somewhere we let the CPU who detects it handle it. |
| 894 | * Also must let continue the others, otherwise the handling |
| 895 | * CPU could deadlock on a lock. |
| 896 | */ |
| 897 | |
| 898 | /* |
| 899 | * No machine check event found. Must be some external |
| 900 | * source or one CPU is hung. Panic. |
| 901 | */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 902 | if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) |
Derek Che | 8af7043 | 2015-02-02 10:30:21 -0800 | [diff] [blame] | 903 | mce_panic("Fatal machine check from unknown source", NULL, NULL); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 904 | |
| 905 | /* |
| 906 | * Now clear all the mces_seen so that they don't reappear on |
| 907 | * the next mce. |
| 908 | */ |
| 909 | for_each_possible_cpu(cpu) |
| 910 | memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); |
| 911 | } |
| 912 | |
| 913 | static atomic_t global_nwo; |
| 914 | |
| 915 | /* |
| 916 | * Start of Monarch synchronization. This waits until all CPUs have |
| 917 | * entered the exception handler and then determines if any of them |
| 918 | * saw a fatal event that requires panic. Then it executes them |
| 919 | * in the entry order. |
| 920 | * TBD double check parallel CPU hotunplug |
| 921 | */ |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 922 | static int mce_start(int *no_way_out) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 923 | { |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 924 | int order; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 925 | int cpus = num_online_cpus(); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 926 | u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 927 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 928 | if (!timeout) |
| 929 | return -1; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 930 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 931 | atomic_add(*no_way_out, &global_nwo); |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 932 | /* |
Davidlohr Bueso | bf92b1f | 2016-04-06 10:05:15 +0200 | [diff] [blame] | 933 | * Rely on the implied barrier below, such that global_nwo |
| 934 | * is updated before mce_callin. |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 935 | */ |
Borislav Petkov | a95436e | 2009-06-20 23:28:22 -0700 | [diff] [blame] | 936 | order = atomic_inc_return(&mce_callin); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 937 | |
| 938 | /* |
| 939 | * Wait for everyone. |
| 940 | */ |
| 941 | while (atomic_read(&mce_callin) != cpus) { |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 942 | if (mce_timed_out(&timeout, |
| 943 | "Timeout: Not all CPUs entered broadcast exception handler")) { |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 944 | atomic_set(&global_nwo, 0); |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 945 | return -1; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 946 | } |
| 947 | ndelay(SPINUNIT); |
| 948 | } |
| 949 | |
| 950 | /* |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 951 | * mce_callin should be read before global_nwo |
| 952 | */ |
| 953 | smp_rmb(); |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 954 | |
| 955 | if (order == 1) { |
| 956 | /* |
| 957 | * Monarch: Starts executing now, the others wait. |
| 958 | */ |
| 959 | atomic_set(&mce_executing, 1); |
| 960 | } else { |
| 961 | /* |
| 962 | * Subject: Now start the scanning loop one by one in |
| 963 | * the original callin order. |
| 964 | * This way when there are any shared banks it will be |
| 965 | * only seen by one CPU before cleared, avoiding duplicates. |
| 966 | */ |
| 967 | while (atomic_read(&mce_executing) < order) { |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 968 | if (mce_timed_out(&timeout, |
| 969 | "Timeout: Subject CPUs unable to finish machine check processing")) { |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 970 | atomic_set(&global_nwo, 0); |
| 971 | return -1; |
| 972 | } |
| 973 | ndelay(SPINUNIT); |
| 974 | } |
| 975 | } |
| 976 | |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 977 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 978 | * Cache the global no_way_out state. |
| 979 | */ |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 980 | *no_way_out = atomic_read(&global_nwo); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 981 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 982 | return order; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 983 | } |
| 984 | |
| 985 | /* |
| 986 | * Synchronize between CPUs after main scanning loop. |
| 987 | * This invokes the bulk of the Monarch processing. |
| 988 | */ |
| 989 | static int mce_end(int order) |
| 990 | { |
| 991 | int ret = -1; |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 992 | u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 993 | |
| 994 | if (!timeout) |
| 995 | goto reset; |
| 996 | if (order < 0) |
| 997 | goto reset; |
| 998 | |
| 999 | /* |
| 1000 | * Allow others to run. |
| 1001 | */ |
| 1002 | atomic_inc(&mce_executing); |
| 1003 | |
| 1004 | if (order == 1) { |
| 1005 | /* CHECKME: Can this race with a parallel hotplug? */ |
| 1006 | int cpus = num_online_cpus(); |
| 1007 | |
| 1008 | /* |
| 1009 | * Monarch: Wait for everyone to go through their scanning |
| 1010 | * loops. |
| 1011 | */ |
| 1012 | while (atomic_read(&mce_executing) <= cpus) { |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 1013 | if (mce_timed_out(&timeout, |
| 1014 | "Timeout: Monarch CPU unable to finish machine check processing")) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1015 | goto reset; |
| 1016 | ndelay(SPINUNIT); |
| 1017 | } |
| 1018 | |
| 1019 | mce_reign(); |
| 1020 | barrier(); |
| 1021 | ret = 0; |
| 1022 | } else { |
| 1023 | /* |
| 1024 | * Subject: Wait for Monarch to finish. |
| 1025 | */ |
| 1026 | while (atomic_read(&mce_executing) != 0) { |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 1027 | if (mce_timed_out(&timeout, |
| 1028 | "Timeout: Monarch CPU did not finish machine check processing")) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1029 | goto reset; |
| 1030 | ndelay(SPINUNIT); |
| 1031 | } |
| 1032 | |
| 1033 | /* |
| 1034 | * Don't reset anything. That's done by the Monarch. |
| 1035 | */ |
| 1036 | return 0; |
| 1037 | } |
| 1038 | |
| 1039 | /* |
| 1040 | * Reset all global state. |
| 1041 | */ |
| 1042 | reset: |
| 1043 | atomic_set(&global_nwo, 0); |
| 1044 | atomic_set(&mce_callin, 0); |
| 1045 | barrier(); |
| 1046 | |
| 1047 | /* |
| 1048 | * Let others run again. |
| 1049 | */ |
| 1050 | atomic_set(&mce_executing, 0); |
| 1051 | return ret; |
| 1052 | } |
| 1053 | |
| 1054 | static void mce_clear_state(unsigned long *toclear) |
| 1055 | { |
| 1056 | int i; |
| 1057 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1058 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1059 | if (test_bit(i, toclear)) |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 1060 | mce_wrmsrl(msr_ops.status(i), 0); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1061 | } |
| 1062 | } |
| 1063 | |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1064 | static int do_memory_failure(struct mce *m) |
| 1065 | { |
| 1066 | int flags = MF_ACTION_REQUIRED; |
| 1067 | int ret; |
| 1068 | |
| 1069 | pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr); |
| 1070 | if (!(m->mcgstatus & MCG_STATUS_RIPV)) |
| 1071 | flags |= MF_MUST_KILL; |
Eric W. Biederman | 83b5753 | 2017-07-09 18:14:01 -0500 | [diff] [blame] | 1072 | ret = memory_failure(m->addr >> PAGE_SHIFT, flags); |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1073 | if (ret) |
| 1074 | pr_err("Memory error not recovered"); |
Tony Luck | fd0e786 | 2018-01-25 14:23:48 -0800 | [diff] [blame] | 1075 | else |
Dan Williams | 284ce40 | 2018-07-13 21:50:32 -0700 | [diff] [blame] | 1076 | set_mce_nospec(m->addr >> PAGE_SHIFT); |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1077 | return ret; |
| 1078 | } |
| 1079 | |
Borislav Petkov | d3d6923 | 2018-06-22 11:54:24 +0200 | [diff] [blame] | 1080 | |
| 1081 | /* |
| 1082 | * Cases where we avoid rendezvous handler timeout: |
| 1083 | * 1) If this CPU is offline. |
| 1084 | * |
| 1085 | * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to |
| 1086 | * skip those CPUs which remain looping in the 1st kernel - see |
| 1087 | * crash_nmi_callback(). |
| 1088 | * |
| 1089 | * Note: there still is a small window between kexec-ing and the new, |
| 1090 | * kdump kernel establishing a new #MC handler where a broadcasted MCE |
| 1091 | * might not get handled properly. |
| 1092 | */ |
| 1093 | static bool __mc_check_crashing_cpu(int cpu) |
| 1094 | { |
| 1095 | if (cpu_is_offline(cpu) || |
| 1096 | (crashing_cpu != -1 && crashing_cpu != cpu)) { |
| 1097 | u64 mcgstatus; |
| 1098 | |
| 1099 | mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); |
| 1100 | if (mcgstatus & MCG_STATUS_RIPV) { |
| 1101 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); |
| 1102 | return true; |
| 1103 | } |
| 1104 | } |
| 1105 | return false; |
| 1106 | } |
| 1107 | |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1108 | static void __mc_scan_banks(struct mce *m, struct mce *final, |
| 1109 | unsigned long *toclear, unsigned long *valid_banks, |
| 1110 | int no_way_out, int *worst) |
| 1111 | { |
| 1112 | struct mca_config *cfg = &mca_cfg; |
| 1113 | int severity, i; |
| 1114 | |
| 1115 | for (i = 0; i < cfg->banks; i++) { |
| 1116 | __clear_bit(i, toclear); |
| 1117 | if (!test_bit(i, valid_banks)) |
| 1118 | continue; |
Borislav Petkov | d5c84ef | 2018-06-22 11:54:27 +0200 | [diff] [blame] | 1119 | |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1120 | if (!mce_banks[i].ctl) |
| 1121 | continue; |
| 1122 | |
| 1123 | m->misc = 0; |
| 1124 | m->addr = 0; |
| 1125 | m->bank = i; |
| 1126 | |
| 1127 | m->status = mce_rdmsrl(msr_ops.status(i)); |
Borislav Petkov | d5c84ef | 2018-06-22 11:54:27 +0200 | [diff] [blame] | 1128 | if (!(m->status & MCI_STATUS_VAL)) |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1129 | continue; |
| 1130 | |
| 1131 | /* |
Borislav Petkov | d5c84ef | 2018-06-22 11:54:27 +0200 | [diff] [blame] | 1132 | * Corrected or non-signaled errors are handled by |
| 1133 | * machine_check_poll(). Leave them alone, unless this panics. |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1134 | */ |
| 1135 | if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && |
| 1136 | !no_way_out) |
| 1137 | continue; |
| 1138 | |
Borislav Petkov | d5c84ef | 2018-06-22 11:54:27 +0200 | [diff] [blame] | 1139 | /* Set taint even when machine check was not enabled. */ |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1140 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
| 1141 | |
| 1142 | severity = mce_severity(m, cfg->tolerant, NULL, true); |
| 1143 | |
| 1144 | /* |
| 1145 | * When machine check was for corrected/deferred handler don't |
Borislav Petkov | d5c84ef | 2018-06-22 11:54:27 +0200 | [diff] [blame] | 1146 | * touch, unless we're panicking. |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1147 | */ |
| 1148 | if ((severity == MCE_KEEP_SEVERITY || |
| 1149 | severity == MCE_UCNA_SEVERITY) && !no_way_out) |
| 1150 | continue; |
Borislav Petkov | d5c84ef | 2018-06-22 11:54:27 +0200 | [diff] [blame] | 1151 | |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1152 | __set_bit(i, toclear); |
Borislav Petkov | d5c84ef | 2018-06-22 11:54:27 +0200 | [diff] [blame] | 1153 | |
| 1154 | /* Machine check event was not enabled. Clear, but ignore. */ |
| 1155 | if (severity == MCE_NO_SEVERITY) |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1156 | continue; |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1157 | |
| 1158 | mce_read_aux(m, i); |
| 1159 | |
| 1160 | /* assuming valid severity level != 0 */ |
| 1161 | m->severity = severity; |
| 1162 | |
| 1163 | mce_log(m); |
| 1164 | |
| 1165 | if (severity > *worst) { |
| 1166 | *final = *m; |
| 1167 | *worst = severity; |
| 1168 | } |
| 1169 | } |
| 1170 | |
| 1171 | /* mce_clear_state will clear *final, save locally for use later */ |
| 1172 | *m = *final; |
| 1173 | } |
| 1174 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1175 | /* |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1176 | * The actual machine check handler. This only handles real |
| 1177 | * exceptions when something got corrupted coming in through int 18. |
| 1178 | * |
| 1179 | * This is executed in NMI context not subject to normal locking rules. This |
| 1180 | * implies that most kernel services cannot be safely used. Don't even |
| 1181 | * think about putting a printk in there! |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1182 | * |
| 1183 | * On Intel systems this is entered on all CPUs in parallel through |
| 1184 | * MCE broadcast. However some CPUs might be broken beyond repair, |
| 1185 | * so be always careful when synchronizing with others. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1186 | */ |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1187 | void do_machine_check(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | { |
Borislav Petkov | d3d6923 | 2018-06-22 11:54:24 +0200 | [diff] [blame] | 1189 | DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); |
| 1190 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1191 | struct mca_config *cfg = &mca_cfg; |
Borislav Petkov | d3d6923 | 2018-06-22 11:54:24 +0200 | [diff] [blame] | 1192 | int cpu = smp_processor_id(); |
| 1193 | char *msg = "Unknown"; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1194 | struct mce m, *final; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1195 | int worst = 0; |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1196 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1197 | /* |
| 1198 | * Establish sequential order between the CPUs entering the machine |
| 1199 | * check handler. |
| 1200 | */ |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1201 | int order = -1; |
Borislav Petkov | d3d6923 | 2018-06-22 11:54:24 +0200 | [diff] [blame] | 1202 | |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1203 | /* |
| 1204 | * If no_way_out gets set, there is no safe way to recover from this |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1205 | * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1206 | */ |
| 1207 | int no_way_out = 0; |
Borislav Petkov | d3d6923 | 2018-06-22 11:54:24 +0200 | [diff] [blame] | 1208 | |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1209 | /* |
| 1210 | * If kill_it gets set, there might be a way to recover from this |
| 1211 | * error. |
| 1212 | */ |
| 1213 | int kill_it = 0; |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1214 | |
| 1215 | /* |
| 1216 | * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES |
| 1217 | * on Intel. |
| 1218 | */ |
| 1219 | int lmce = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | |
Borislav Petkov | d3d6923 | 2018-06-22 11:54:24 +0200 | [diff] [blame] | 1221 | if (__mc_check_crashing_cpu(cpu)) |
| 1222 | return; |
Ashok Raj | d90167a | 2015-12-10 11:12:26 +0100 | [diff] [blame] | 1223 | |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 1224 | ist_enter(regs); |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 1225 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 1226 | this_cpu_inc(mce_exception_count); |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 1227 | |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 1228 | mce_gather_info(&m, regs); |
Borislav Petkov | 669c00f | 2017-01-23 19:35:09 +0100 | [diff] [blame] | 1229 | m.tsc = rdtsc(); |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 1230 | |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 1231 | final = this_cpu_ptr(&mces_seen); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1232 | *final = m; |
| 1233 | |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 1234 | memset(valid_banks, 0, sizeof(valid_banks)); |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1235 | no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); |
Hidetoshi Seto | 680b6cf | 2009-08-26 16:20:36 +0900 | [diff] [blame] | 1236 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | barrier(); |
| 1238 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1239 | /* |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1240 | * When no restart IP might need to kill or panic. |
| 1241 | * Assume the worst for now, but if we find the |
| 1242 | * severity is MCE_AR_SEVERITY we have other options. |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1243 | */ |
| 1244 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) |
| 1245 | kill_it = 1; |
| 1246 | |
| 1247 | /* |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1248 | * Check if this MCE is signaled to only this logical processor, |
| 1249 | * on Intel only. |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1250 | */ |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1251 | if (m.cpuvendor == X86_VENDOR_INTEL) |
| 1252 | lmce = m.mcgstatus & MCG_STATUS_LMCES; |
| 1253 | |
| 1254 | /* |
Tony Luck | 40c36e2 | 2018-06-22 11:54:23 +0200 | [diff] [blame] | 1255 | * Local machine check may already know that we have to panic. |
| 1256 | * Broadcast machine check begins rendezvous in mce_start() |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1257 | * Go through all banks in exclusion of the other CPUs. This way we |
| 1258 | * don't report duplicated events on shared banks because the first one |
Tony Luck | 40c36e2 | 2018-06-22 11:54:23 +0200 | [diff] [blame] | 1259 | * to see it will clear it. |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1260 | */ |
Tony Luck | 40c36e2 | 2018-06-22 11:54:23 +0200 | [diff] [blame] | 1261 | if (lmce) { |
| 1262 | if (no_way_out) |
| 1263 | mce_panic("Fatal local machine check", &m, msg); |
| 1264 | } else { |
Ashok Raj | 243d657 | 2015-06-04 18:55:24 +0200 | [diff] [blame] | 1265 | order = mce_start(&no_way_out); |
Tony Luck | 40c36e2 | 2018-06-22 11:54:23 +0200 | [diff] [blame] | 1266 | } |
Ashok Raj | 243d657 | 2015-06-04 18:55:24 +0200 | [diff] [blame] | 1267 | |
Borislav Petkov | f35565e | 2018-06-22 11:54:26 +0200 | [diff] [blame] | 1268 | __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst); |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1269 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1270 | if (!no_way_out) |
| 1271 | mce_clear_state(toclear); |
| 1272 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1273 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1274 | * Do most of the synchronization with other CPUs. |
| 1275 | * When there's any problem use only local no_way_out state. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1276 | */ |
Ashok Raj | 243d657 | 2015-06-04 18:55:24 +0200 | [diff] [blame] | 1277 | if (!lmce) { |
| 1278 | if (mce_end(order) < 0) |
| 1279 | no_way_out = worst >= MCE_PANIC_SEVERITY; |
| 1280 | } else { |
| 1281 | /* |
Tony Luck | 40c36e2 | 2018-06-22 11:54:23 +0200 | [diff] [blame] | 1282 | * If there was a fatal machine check we should have |
| 1283 | * already called mce_panic earlier in this function. |
| 1284 | * Since we re-read the banks, we might have found |
| 1285 | * something new. Check again to see if we found a |
| 1286 | * fatal error. We call "mce_severity()" again to |
| 1287 | * make sure we have the right "msg". |
Ashok Raj | 243d657 | 2015-06-04 18:55:24 +0200 | [diff] [blame] | 1288 | */ |
Tony Luck | 40c36e2 | 2018-06-22 11:54:23 +0200 | [diff] [blame] | 1289 | if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { |
| 1290 | mce_severity(&m, cfg->tolerant, &msg, true); |
| 1291 | mce_panic("Local fatal machine check!", &m, msg); |
| 1292 | } |
Ashok Raj | 243d657 | 2015-06-04 18:55:24 +0200 | [diff] [blame] | 1293 | } |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1294 | |
| 1295 | /* |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1296 | * If tolerant is at an insane level we drop requests to kill |
| 1297 | * processes and continue even when there is no way out. |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1298 | */ |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1299 | if (cfg->tolerant == 3) |
| 1300 | kill_it = 0; |
| 1301 | else if (no_way_out) |
| 1302 | mce_panic("Fatal machine check on current CPU", &m, msg); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1303 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1304 | if (worst > 0) |
| 1305 | mce_report_event(regs); |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 1306 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); |
Borislav Petkov | 45deca7 | 2018-06-22 11:54:25 +0200 | [diff] [blame] | 1307 | |
Andi Kleen | 88921be | 2009-05-27 21:56:51 +0200 | [diff] [blame] | 1308 | sync_core(); |
Luck, Tony | d4812e1 | 2015-01-05 16:44:42 -0800 | [diff] [blame] | 1309 | |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1310 | if (worst != MCE_AR_SEVERITY && !kill_it) |
| 1311 | goto out_ist; |
Luck, Tony | d4812e1 | 2015-01-05 16:44:42 -0800 | [diff] [blame] | 1312 | |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1313 | /* Fault was in user mode and we need to take some action */ |
| 1314 | if ((m.cs & 3) == 3) { |
| 1315 | ist_begin_non_atomic(regs); |
| 1316 | local_irq_enable(); |
| 1317 | |
| 1318 | if (kill_it || do_memory_failure(&m)) |
| 1319 | force_sig(SIGBUS, current); |
| 1320 | local_irq_disable(); |
| 1321 | ist_end_non_atomic(); |
| 1322 | } else { |
Jann Horn | 81fd9c1 | 2018-08-28 22:14:19 +0200 | [diff] [blame] | 1323 | if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0)) |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1324 | mce_panic("Failed kernel mode recovery", &m, NULL); |
Luck, Tony | d4812e1 | 2015-01-05 16:44:42 -0800 | [diff] [blame] | 1325 | } |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1326 | |
| 1327 | out_ist: |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 1328 | ist_exit(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | } |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 1330 | EXPORT_SYMBOL_GPL(do_machine_check); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1332 | #ifndef CONFIG_MEMORY_FAILURE |
Eric W. Biederman | 83b5753 | 2017-07-09 18:14:01 -0500 | [diff] [blame] | 1333 | int memory_failure(unsigned long pfn, int flags) |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1334 | { |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1335 | /* mce_severity() should not hand us an ACTION_REQUIRED error */ |
| 1336 | BUG_ON(flags & MF_ACTION_REQUIRED); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1337 | pr_err("Uncorrected memory error in page 0x%lx ignored\n" |
| 1338 | "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", |
| 1339 | pfn); |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1340 | |
| 1341 | return 0; |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1342 | } |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1343 | #endif |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1344 | |
| 1345 | /* |
Tim Hockin | 8a336b0 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 1346 | * Periodic polling timer for "silent" machine check errors. If the |
| 1347 | * poller finds an MCE, poll 2x faster. When the poller finds no more |
| 1348 | * errors, poll 2x slower (up to check_interval seconds). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1349 | */ |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1350 | static unsigned long check_interval = INITIAL_CHECK_INTERVAL; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1351 | |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1352 | static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1353 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1355 | static unsigned long mce_adjust_timer_default(unsigned long interval) |
| 1356 | { |
| 1357 | return interval; |
| 1358 | } |
| 1359 | |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1360 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1361 | |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 1362 | static void __start_timer(struct timer_list *t, unsigned long interval) |
Chen, Gong | 27f6c57 | 2014-03-27 21:24:36 -0400 | [diff] [blame] | 1363 | { |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1364 | unsigned long when = jiffies + interval; |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1365 | unsigned long flags; |
| 1366 | |
| 1367 | local_irq_save(flags); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1368 | |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 1369 | if (!timer_pending(t) || time_before(when, t->expires)) |
| 1370 | mod_timer(t, round_jiffies(when)); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1371 | |
| 1372 | local_irq_restore(flags); |
| 1373 | } |
| 1374 | |
Kees Cook | 92bb6cb | 2017-10-04 17:54:25 -0700 | [diff] [blame] | 1375 | static void mce_timer_fn(struct timer_list *t) |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1376 | { |
Kees Cook | 92bb6cb | 2017-10-04 17:54:25 -0700 | [diff] [blame] | 1377 | struct timer_list *cpu_t = this_cpu_ptr(&mce_timer); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1378 | unsigned long iv; |
| 1379 | |
Kees Cook | 92bb6cb | 2017-10-04 17:54:25 -0700 | [diff] [blame] | 1380 | WARN_ON(cpu_t != t); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1381 | |
| 1382 | iv = __this_cpu_read(mce_next_interval); |
| 1383 | |
| 1384 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
Borislav Petkov | 5446735 | 2016-11-10 14:10:53 +0100 | [diff] [blame] | 1385 | machine_check_poll(0, this_cpu_ptr(&mce_poll_banks)); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1386 | |
| 1387 | if (mce_intel_cmci_poll()) { |
| 1388 | iv = mce_adjust_timer(iv); |
| 1389 | goto done; |
| 1390 | } |
| 1391 | } |
| 1392 | |
| 1393 | /* |
| 1394 | * Alert userspace if needed. If we logged an MCE, reduce the polling |
| 1395 | * interval, otherwise increase the polling interval. |
| 1396 | */ |
| 1397 | if (mce_notify_irq()) |
| 1398 | iv = max(iv / 2, (unsigned long) HZ/100); |
| 1399 | else |
| 1400 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
| 1401 | |
| 1402 | done: |
| 1403 | __this_cpu_write(mce_next_interval, iv); |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 1404 | __start_timer(t, iv); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1405 | } |
| 1406 | |
| 1407 | /* |
| 1408 | * Ensure that the timer is firing in @interval from now. |
| 1409 | */ |
| 1410 | void mce_timer_kick(unsigned long interval) |
| 1411 | { |
| 1412 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
| 1413 | unsigned long iv = __this_cpu_read(mce_next_interval); |
| 1414 | |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 1415 | __start_timer(t, interval); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1416 | |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1417 | if (interval < iv) |
| 1418 | __this_cpu_write(mce_next_interval, interval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1419 | } |
| 1420 | |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 1421 | /* Must not be called in IRQ context where del_timer_sync() can deadlock */ |
| 1422 | static void mce_timer_delete_all(void) |
| 1423 | { |
| 1424 | int cpu; |
| 1425 | |
| 1426 | for_each_online_cpu(cpu) |
| 1427 | del_timer_sync(&per_cpu(mce_timer, cpu)); |
| 1428 | } |
| 1429 | |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1430 | /* |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1431 | * Notify the user(s) about new machine check events. |
| 1432 | * Can be called from interrupt context, but not from machine check/NMI |
| 1433 | * context. |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1434 | */ |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 1435 | int mce_notify_irq(void) |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1436 | { |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 1437 | /* Not more than two messages every minute */ |
| 1438 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); |
| 1439 | |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 1440 | if (test_and_clear_bit(0, &mce_need_notify)) { |
Tony Luck | 5de97c9 | 2017-03-27 11:33:03 +0200 | [diff] [blame] | 1441 | mce_work_trigger(); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1442 | |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 1443 | if (__ratelimit(&ratelimit)) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 1444 | pr_info(HW_ERR "Machine check events logged\n"); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1445 | |
| 1446 | return 1; |
| 1447 | } |
| 1448 | return 0; |
| 1449 | } |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 1450 | EXPORT_SYMBOL_GPL(mce_notify_irq); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1451 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1452 | static int __mcheck_cpu_mce_banks_init(void) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1453 | { |
| 1454 | int i; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1455 | u8 num_banks = mca_cfg.banks; |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1456 | |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 1457 | mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL); |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1458 | if (!mce_banks) |
| 1459 | return -ENOMEM; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1460 | |
| 1461 | for (i = 0; i < num_banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1462 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1463 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1464 | b->ctl = -1ULL; |
| 1465 | b->init = 1; |
| 1466 | } |
| 1467 | return 0; |
| 1468 | } |
| 1469 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1470 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1471 | * Initialize Machine Checks for a CPU. |
| 1472 | */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1473 | static int __mcheck_cpu_cap_init(void) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1474 | { |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1475 | unsigned b; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1476 | u64 cap; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1477 | |
| 1478 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
Thomas Gleixner | 01c6680 | 2009-04-08 12:31:24 +0200 | [diff] [blame] | 1479 | |
| 1480 | b = cap & MCG_BANKCNT_MASK; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1481 | if (!mca_cfg.banks) |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1482 | pr_info("CPU supports %d MCE banks\n", b); |
Ingo Molnar | b659294 | 2009-04-08 12:31:27 +0200 | [diff] [blame] | 1483 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1484 | if (b > MAX_NR_BANKS) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1485 | pr_warn("Using only %u machine check banks out of %u\n", |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1486 | MAX_NR_BANKS, b); |
| 1487 | b = MAX_NR_BANKS; |
| 1488 | } |
| 1489 | |
| 1490 | /* Don't support asymmetric configurations today */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1491 | WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); |
| 1492 | mca_cfg.banks = b; |
| 1493 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1494 | if (!mce_banks) { |
Hidetoshi Seto | cffd377 | 2009-11-12 15:52:40 +0900 | [diff] [blame] | 1495 | int err = __mcheck_cpu_mce_banks_init(); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1496 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1497 | if (err) |
| 1498 | return err; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1499 | } |
| 1500 | |
| 1501 | /* Use accurate RIP reporting if available. */ |
Thomas Gleixner | 01c6680 | 2009-04-08 12:31:24 +0200 | [diff] [blame] | 1502 | if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1503 | mca_cfg.rip_msr = MSR_IA32_MCG_EIP; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1504 | |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1505 | if (cap & MCG_SER_P) |
Borislav Petkov | 0993394 | 2018-02-21 11:18:54 +0100 | [diff] [blame] | 1506 | mca_cfg.ser = 1; |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1507 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1508 | return 0; |
| 1509 | } |
| 1510 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1511 | static void __mcheck_cpu_init_generic(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | { |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1513 | enum mcp_flags m_fl = 0; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1514 | mce_banks_t all_banks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | u64 cap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1516 | |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1517 | if (!mca_cfg.bootlog) |
| 1518 | m_fl = MCP_DONTLOG; |
| 1519 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1520 | /* |
| 1521 | * Log the machine checks left over from the previous reset. |
| 1522 | */ |
Andi Kleen | ee031c3 | 2009-02-12 13:49:34 +0100 | [diff] [blame] | 1523 | bitmap_fill(all_banks, MAX_NR_BANKS); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1524 | machine_check_poll(MCP_UC | m_fl, &all_banks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1525 | |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 1526 | cr4_set_bits(X86_CR4_MCE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1528 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | if (cap & MCG_CTL_P) |
| 1530 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
Aravind Gopalakrishnan | bb91f8c | 2016-04-30 14:33:53 +0200 | [diff] [blame] | 1531 | } |
| 1532 | |
| 1533 | static void __mcheck_cpu_init_clear_banks(void) |
| 1534 | { |
| 1535 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1537 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1538 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1539 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1540 | if (!b->init) |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1541 | continue; |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 1542 | wrmsrl(msr_ops.ctl(i), b->ctl); |
| 1543 | wrmsrl(msr_ops.status(i), 0); |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1544 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 | } |
| 1546 | |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1547 | /* |
| 1548 | * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and |
| 1549 | * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM |
| 1550 | * Vol 3B Table 15-20). But this confuses both the code that determines |
| 1551 | * whether the machine check occurred in kernel or user mode, and also |
| 1552 | * the severity assessment code. Pretend that EIPV was set, and take the |
| 1553 | * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. |
| 1554 | */ |
| 1555 | static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) |
| 1556 | { |
| 1557 | if (bank != 0) |
| 1558 | return; |
| 1559 | if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) |
| 1560 | return; |
| 1561 | if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| |
| 1562 | MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| |
| 1563 | MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| |
| 1564 | MCACOD)) != |
| 1565 | (MCI_STATUS_UC|MCI_STATUS_EN| |
| 1566 | MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| |
| 1567 | MCI_STATUS_AR|MCACOD_INSTR)) |
| 1568 | return; |
| 1569 | |
| 1570 | m->mcgstatus |= MCG_STATUS_EIPV; |
| 1571 | m->ip = regs->ip; |
| 1572 | m->cs = regs->cs; |
| 1573 | } |
| 1574 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1575 | /* Add per CPU specific workarounds here */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1576 | static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1577 | { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1578 | struct mca_config *cfg = &mca_cfg; |
| 1579 | |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1580 | if (c->x86_vendor == X86_VENDOR_UNKNOWN) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1581 | pr_info("unknown CPU type - not enabling MCE support\n"); |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1582 | return -EOPNOTSUPP; |
| 1583 | } |
| 1584 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1585 | /* This should be disabled by the BIOS, but isn't always */ |
Jan Beulich | 911f6a7 | 2008-04-22 16:22:21 +0100 | [diff] [blame] | 1586 | if (c->x86_vendor == X86_VENDOR_AMD) { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1587 | if (c->x86 == 15 && cfg->banks > 4) { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1588 | /* |
| 1589 | * disable GART TBL walk error reporting, which |
| 1590 | * trips off incorrectly with the IOMMU & 3ware |
| 1591 | * & Cerberus: |
| 1592 | */ |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1593 | clear_bit(10, (unsigned long *)&mce_banks[4].ctl); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1594 | } |
Yazen Ghannam | 6057077 | 2017-06-13 18:28:35 +0200 | [diff] [blame] | 1595 | if (c->x86 < 0x11 && cfg->bootlog < 0) { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1596 | /* |
| 1597 | * Lots of broken BIOS around that don't clear them |
| 1598 | * by default and leave crap in there. Don't log: |
| 1599 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1600 | cfg->bootlog = 0; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1601 | } |
Andi Kleen | 2e6f694 | 2009-04-27 18:42:48 +0200 | [diff] [blame] | 1602 | /* |
| 1603 | * Various K7s with broken bank 0 around. Always disable |
| 1604 | * by default. |
| 1605 | */ |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1606 | if (c->x86 == 6 && cfg->banks > 0) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1607 | mce_banks[0].ctl = 0; |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1608 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1609 | /* |
Aravind Gopalakrishnan | bf80bbd | 2015-03-23 10:42:52 -0500 | [diff] [blame] | 1610 | * overflow_recov is supported for F15h Models 00h-0fh |
| 1611 | * even though we don't have a CPUID bit for it. |
| 1612 | */ |
| 1613 | if (c->x86 == 0x15 && c->x86_model <= 0xf) |
| 1614 | mce_flags.overflow_recov = 1; |
| 1615 | |
| 1616 | /* |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1617 | * Turn off MC4_MISC thresholding banks on those models since |
| 1618 | * they're not supported there. |
| 1619 | */ |
| 1620 | if (c->x86 == 0x15 && |
| 1621 | (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { |
| 1622 | int i; |
| 1623 | u64 hwcr; |
| 1624 | bool need_toggle; |
| 1625 | u32 msrs[] = { |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1626 | 0x00000413, /* MC4_MISC0 */ |
| 1627 | 0xc0000408, /* MC4_MISC1 */ |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1628 | }; |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1629 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1630 | rdmsrl(MSR_K7_HWCR, hwcr); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1631 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1632 | /* McStatusWrEn has to be set */ |
| 1633 | need_toggle = !(hwcr & BIT(18)); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1634 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1635 | if (need_toggle) |
| 1636 | wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1637 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1638 | /* Clear CntP bit safely */ |
| 1639 | for (i = 0; i < ARRAY_SIZE(msrs); i++) |
| 1640 | msr_clear_bit(msrs[i], 62); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1641 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1642 | /* restore old settings */ |
| 1643 | if (need_toggle) |
| 1644 | wrmsrl(MSR_K7_HWCR, hwcr); |
| 1645 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 | } |
Andi Kleen | e583538 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 1647 | |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1648 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
| 1649 | /* |
| 1650 | * SDM documents that on family 6 bank 0 should not be written |
| 1651 | * because it aliases to another special BIOS controlled |
| 1652 | * register. |
| 1653 | * But it's not aliased anymore on model 0x1a+ |
| 1654 | * Don't ignore bank 0 completely because there could be a |
| 1655 | * valid event later, merely don't write CTL0. |
| 1656 | */ |
| 1657 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1658 | if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1659 | mce_banks[0].init = 0; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1660 | |
| 1661 | /* |
| 1662 | * All newer Intel systems support MCE broadcasting. Enable |
| 1663 | * synchronization with a one second timeout. |
| 1664 | */ |
| 1665 | if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1666 | cfg->monarch_timeout < 0) |
| 1667 | cfg->monarch_timeout = USEC_PER_SEC; |
Bartlomiej Zolnierkiewicz | c7f6fa4 | 2009-07-28 23:52:54 +0200 | [diff] [blame] | 1668 | |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1669 | /* |
| 1670 | * There are also broken BIOSes on some Pentium M and |
| 1671 | * earlier systems: |
| 1672 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1673 | if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) |
| 1674 | cfg->bootlog = 0; |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1675 | |
| 1676 | if (c->x86 == 6 && c->x86_model == 45) |
| 1677 | quirk_no_way_out = quirk_sandybridge_ifu; |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1678 | } |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1679 | if (cfg->monarch_timeout < 0) |
| 1680 | cfg->monarch_timeout = 0; |
| 1681 | if (cfg->bootlog != 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1682 | cfg->panic_timeout = 30; |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1683 | |
| 1684 | return 0; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1685 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1686 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1687 | static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1688 | { |
| 1689 | if (c->x86 != 5) |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1690 | return 0; |
| 1691 | |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1692 | switch (c->x86_vendor) { |
| 1693 | case X86_VENDOR_INTEL: |
Hidetoshi Seto | c697836 | 2009-06-15 17:22:49 +0900 | [diff] [blame] | 1694 | intel_p5_mcheck_init(c); |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1695 | return 1; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1696 | break; |
| 1697 | case X86_VENDOR_CENTAUR: |
| 1698 | winchip_mcheck_init(c); |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1699 | return 1; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1700 | break; |
Borislav Petkov | dc34bdd | 2015-10-30 13:11:38 +0100 | [diff] [blame] | 1701 | default: |
| 1702 | return 0; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1703 | } |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1704 | |
| 1705 | return 0; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1706 | } |
| 1707 | |
Yazen Ghannam | 5204bf1 | 2017-03-15 12:30:55 -0500 | [diff] [blame] | 1708 | /* |
| 1709 | * Init basic CPU features needed for early decoding of MCEs. |
| 1710 | */ |
| 1711 | static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) |
| 1712 | { |
Pu Wen | ac78bd7 | 2018-09-23 17:36:04 +0800 | [diff] [blame] | 1713 | if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { |
Yazen Ghannam | 5204bf1 | 2017-03-15 12:30:55 -0500 | [diff] [blame] | 1714 | mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); |
| 1715 | mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); |
| 1716 | mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); |
| 1717 | |
| 1718 | if (mce_flags.smca) { |
| 1719 | msr_ops.ctl = smca_ctl_reg; |
| 1720 | msr_ops.status = smca_status_reg; |
| 1721 | msr_ops.addr = smca_addr_reg; |
| 1722 | msr_ops.misc = smca_misc_reg; |
| 1723 | } |
| 1724 | } |
| 1725 | } |
| 1726 | |
David Wang | 13e8582 | 2018-04-25 18:33:39 +0800 | [diff] [blame] | 1727 | static void mce_centaur_feature_init(struct cpuinfo_x86 *c) |
| 1728 | { |
| 1729 | struct mca_config *cfg = &mca_cfg; |
| 1730 | |
| 1731 | /* |
| 1732 | * All newer Centaur CPUs support MCE broadcasting. Enable |
| 1733 | * synchronization with a one second timeout. |
| 1734 | */ |
| 1735 | if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || |
| 1736 | c->x86 > 6) { |
| 1737 | if (cfg->monarch_timeout < 0) |
| 1738 | cfg->monarch_timeout = USEC_PER_SEC; |
| 1739 | } |
| 1740 | } |
| 1741 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1742 | static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1743 | { |
| 1744 | switch (c->x86_vendor) { |
| 1745 | case X86_VENDOR_INTEL: |
| 1746 | mce_intel_feature_init(c); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1747 | mce_adjust_timer = cmci_intel_adjust_timer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1748 | break; |
Aravind Gopalakrishnan | 7559e13 | 2015-05-06 06:58:55 -0500 | [diff] [blame] | 1749 | |
| 1750 | case X86_VENDOR_AMD: { |
Aravind Gopalakrishnan | bfbe0ee | 2016-01-25 20:41:48 +0100 | [diff] [blame] | 1751 | mce_amd_feature_init(c); |
Jacob Shin | 89b831e | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1752 | break; |
Aravind Gopalakrishnan | 7559e13 | 2015-05-06 06:58:55 -0500 | [diff] [blame] | 1753 | } |
Pu Wen | ac78bd7 | 2018-09-23 17:36:04 +0800 | [diff] [blame] | 1754 | |
| 1755 | case X86_VENDOR_HYGON: |
| 1756 | mce_hygon_feature_init(c); |
| 1757 | break; |
| 1758 | |
David Wang | 13e8582 | 2018-04-25 18:33:39 +0800 | [diff] [blame] | 1759 | case X86_VENDOR_CENTAUR: |
| 1760 | mce_centaur_feature_init(c); |
| 1761 | break; |
Aravind Gopalakrishnan | 7559e13 | 2015-05-06 06:58:55 -0500 | [diff] [blame] | 1762 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | default: |
| 1764 | break; |
| 1765 | } |
| 1766 | } |
| 1767 | |
Ashok Raj | 8838eb6 | 2015-08-12 18:29:40 +0200 | [diff] [blame] | 1768 | static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) |
| 1769 | { |
| 1770 | switch (c->x86_vendor) { |
| 1771 | case X86_VENDOR_INTEL: |
| 1772 | mce_intel_feature_clear(c); |
| 1773 | break; |
| 1774 | default: |
| 1775 | break; |
| 1776 | } |
| 1777 | } |
| 1778 | |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 1779 | static void mce_start_timer(struct timer_list *t) |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1780 | { |
Borislav Petkov | 4f75d84 | 2013-12-23 18:05:02 +0100 | [diff] [blame] | 1781 | unsigned long iv = check_interval * HZ; |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1782 | |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1783 | if (mca_cfg.ignore_ce || !iv) |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1784 | return; |
| 1785 | |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 1786 | this_cpu_write(mce_next_interval, iv); |
| 1787 | __start_timer(t, iv); |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1788 | } |
| 1789 | |
Sebastian Andrzej Siewior | 39f152f | 2016-11-10 18:44:45 +0100 | [diff] [blame] | 1790 | static void __mcheck_cpu_setup_timer(void) |
| 1791 | { |
| 1792 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
Sebastian Andrzej Siewior | 39f152f | 2016-11-10 18:44:45 +0100 | [diff] [blame] | 1793 | |
Kees Cook | 92bb6cb | 2017-10-04 17:54:25 -0700 | [diff] [blame] | 1794 | timer_setup(t, mce_timer_fn, TIMER_PINNED); |
Sebastian Andrzej Siewior | 39f152f | 2016-11-10 18:44:45 +0100 | [diff] [blame] | 1795 | } |
| 1796 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1797 | static void __mcheck_cpu_init_timer(void) |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1798 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 1799 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1800 | |
Kees Cook | 92bb6cb | 2017-10-04 17:54:25 -0700 | [diff] [blame] | 1801 | timer_setup(t, mce_timer_fn, TIMER_PINNED); |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 1802 | mce_start_timer(t); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1803 | } |
| 1804 | |
Andi Kleen | 9eda8cb | 2009-07-09 00:31:42 +0200 | [diff] [blame] | 1805 | /* Handle unconfigured int18 (should never happen) */ |
| 1806 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) |
| 1807 | { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1808 | pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", |
Andi Kleen | 9eda8cb | 2009-07-09 00:31:42 +0200 | [diff] [blame] | 1809 | smp_processor_id()); |
| 1810 | } |
| 1811 | |
| 1812 | /* Call the installed machine check handler for this CPU setup. */ |
| 1813 | void (*machine_check_vector)(struct pt_regs *, long error_code) = |
| 1814 | unexpected_machine_check; |
| 1815 | |
Thomas Gleixner | 6f41c34 | 2018-01-18 16:28:26 +0100 | [diff] [blame] | 1816 | dotraplinkage void do_mce(struct pt_regs *regs, long error_code) |
| 1817 | { |
| 1818 | machine_check_vector(regs, error_code); |
| 1819 | } |
| 1820 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1821 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1822 | * Called for each booted CPU to set up machine checks. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1823 | * Must be called with preempt off: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1824 | */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1825 | void mcheck_cpu_init(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1826 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1827 | if (mca_cfg.disabled) |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1828 | return; |
| 1829 | |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1830 | if (__mcheck_cpu_ancient_init(c)) |
| 1831 | return; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1832 | |
Andi Kleen | 5b4408f | 2009-02-12 13:39:30 +0100 | [diff] [blame] | 1833 | if (!mce_available(c)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | return; |
| 1835 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1836 | if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { |
Borislav Petkov | 0993394 | 2018-02-21 11:18:54 +0100 | [diff] [blame] | 1837 | mca_cfg.disabled = 1; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1838 | return; |
| 1839 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1840 | |
Chen, Gong | 648ed94 | 2015-08-12 18:29:34 +0200 | [diff] [blame] | 1841 | if (mce_gen_pool_init()) { |
Borislav Petkov | 0993394 | 2018-02-21 11:18:54 +0100 | [diff] [blame] | 1842 | mca_cfg.disabled = 1; |
Chen, Gong | 648ed94 | 2015-08-12 18:29:34 +0200 | [diff] [blame] | 1843 | pr_emerg("Couldn't allocate MCE records pool!\n"); |
| 1844 | return; |
| 1845 | } |
| 1846 | |
Andi Kleen | 5d72792 | 2009-04-27 19:25:48 +0200 | [diff] [blame] | 1847 | machine_check_vector = do_machine_check; |
| 1848 | |
Yazen Ghannam | 5204bf1 | 2017-03-15 12:30:55 -0500 | [diff] [blame] | 1849 | __mcheck_cpu_init_early(c); |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1850 | __mcheck_cpu_init_generic(); |
| 1851 | __mcheck_cpu_init_vendor(c); |
Aravind Gopalakrishnan | bb91f8c | 2016-04-30 14:33:53 +0200 | [diff] [blame] | 1852 | __mcheck_cpu_init_clear_banks(); |
Sebastian Andrzej Siewior | 39f152f | 2016-11-10 18:44:45 +0100 | [diff] [blame] | 1853 | __mcheck_cpu_setup_timer(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1854 | } |
| 1855 | |
| 1856 | /* |
Ashok Raj | 8838eb6 | 2015-08-12 18:29:40 +0200 | [diff] [blame] | 1857 | * Called for each booted CPU to clear some machine checks opt-ins |
| 1858 | */ |
| 1859 | void mcheck_cpu_clear(struct cpuinfo_x86 *c) |
| 1860 | { |
| 1861 | if (mca_cfg.disabled) |
| 1862 | return; |
| 1863 | |
| 1864 | if (!mce_available(c)) |
| 1865 | return; |
| 1866 | |
| 1867 | /* |
| 1868 | * Possibly to clear general settings generic to x86 |
| 1869 | * __mcheck_cpu_clear_generic(c); |
| 1870 | */ |
| 1871 | __mcheck_cpu_clear_vendor(c); |
| 1872 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1873 | } |
| 1874 | |
Naveen N. Rao | c3d1fb5 | 2013-07-01 21:08:47 +0530 | [diff] [blame] | 1875 | static void __mce_disable_bank(void *arg) |
| 1876 | { |
| 1877 | int bank = *((int *)arg); |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 1878 | __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); |
Naveen N. Rao | c3d1fb5 | 2013-07-01 21:08:47 +0530 | [diff] [blame] | 1879 | cmci_disable_bank(bank); |
| 1880 | } |
| 1881 | |
| 1882 | void mce_disable_bank(int bank) |
| 1883 | { |
| 1884 | if (bank >= mca_cfg.banks) { |
| 1885 | pr_warn(FW_BUG |
| 1886 | "Ignoring request to disable invalid MCA bank %d.\n", |
| 1887 | bank); |
| 1888 | return; |
| 1889 | } |
| 1890 | set_bit(bank, mce_banks_ce_disabled); |
| 1891 | on_each_cpu(__mce_disable_bank, &bank, 1); |
| 1892 | } |
| 1893 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1894 | /* |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1895 | * mce=off Disables machine check |
| 1896 | * mce=no_cmci Disables CMCI |
Ashok Raj | 88d5386 | 2015-06-04 18:55:23 +0200 | [diff] [blame] | 1897 | * mce=no_lmce Disables LMCE |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1898 | * mce=dont_log_ce Clears corrected events silently, no log created for CEs. |
| 1899 | * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1900 | * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) |
| 1901 | * monarchtimeout is how long to wait for other CPUs on machine |
| 1902 | * check, or 0 to not wait |
Yazen Ghannam | 6057077 | 2017-06-13 18:28:35 +0200 | [diff] [blame] | 1903 | * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h |
| 1904 | and older. |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 1905 | * mce=nobootlog Don't log MCEs from before booting. |
Naveen N. Rao | 450cc20 | 2012-09-27 10:08:00 -0700 | [diff] [blame] | 1906 | * mce=bios_cmci_threshold Don't program the CMCI threshold |
Tony Luck | 3637efb | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 1907 | * mce=recovery force enable memcpy_mcsafe() |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 1908 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1909 | static int __init mcheck_enable(char *str) |
| 1910 | { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1911 | struct mca_config *cfg = &mca_cfg; |
| 1912 | |
Bartlomiej Zolnierkiewicz | e3346fc | 2009-07-28 23:55:09 +0200 | [diff] [blame] | 1913 | if (*str == 0) { |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1914 | enable_p5_mce(); |
Bartlomiej Zolnierkiewicz | e3346fc | 2009-07-28 23:55:09 +0200 | [diff] [blame] | 1915 | return 1; |
| 1916 | } |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1917 | if (*str == '=') |
| 1918 | str++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1919 | if (!strcmp(str, "off")) |
Borislav Petkov | 0993394 | 2018-02-21 11:18:54 +0100 | [diff] [blame] | 1920 | cfg->disabled = 1; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1921 | else if (!strcmp(str, "no_cmci")) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1922 | cfg->cmci_disabled = true; |
Ashok Raj | 88d5386 | 2015-06-04 18:55:23 +0200 | [diff] [blame] | 1923 | else if (!strcmp(str, "no_lmce")) |
Borislav Petkov | 0993394 | 2018-02-21 11:18:54 +0100 | [diff] [blame] | 1924 | cfg->lmce_disabled = 1; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1925 | else if (!strcmp(str, "dont_log_ce")) |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1926 | cfg->dont_log_ce = true; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 1927 | else if (!strcmp(str, "ignore_ce")) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1928 | cfg->ignore_ce = true; |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 1929 | else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1930 | cfg->bootlog = (str[0] == 'b'); |
Naveen N. Rao | 450cc20 | 2012-09-27 10:08:00 -0700 | [diff] [blame] | 1931 | else if (!strcmp(str, "bios_cmci_threshold")) |
Borislav Petkov | 0993394 | 2018-02-21 11:18:54 +0100 | [diff] [blame] | 1932 | cfg->bios_cmci_threshold = 1; |
Tony Luck | 0f68c08 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1933 | else if (!strcmp(str, "recovery")) |
Borislav Petkov | 0993394 | 2018-02-21 11:18:54 +0100 | [diff] [blame] | 1934 | cfg->recovery = 1; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1935 | else if (isdigit(str[0])) { |
Xie XiuQi | 5c31b28 | 2015-05-26 10:28:21 +0200 | [diff] [blame] | 1936 | if (get_option(&str, &cfg->tolerant) == 2) |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1937 | get_option(&str, &(cfg->monarch_timeout)); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1938 | } else { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1939 | pr_info("mce argument %s ignored. Please use /sys\n", str); |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 1940 | return 0; |
| 1941 | } |
OGAWA Hirofumi | 9b41046 | 2006-03-31 02:30:33 -0800 | [diff] [blame] | 1942 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1943 | } |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1944 | __setup("mce", mcheck_enable); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1945 | |
Yong Wang | a2202aa | 2009-11-10 09:38:24 +0800 | [diff] [blame] | 1946 | int __init mcheck_init(void) |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 1947 | { |
Yong Wang | a2202aa | 2009-11-10 09:38:24 +0800 | [diff] [blame] | 1948 | mcheck_intel_therm_init(); |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 1949 | mce_register_decode_chain(&first_nb); |
Borislav Petkov | eef4dfa | 2015-08-12 18:29:38 +0200 | [diff] [blame] | 1950 | mce_register_decode_chain(&mce_srao_nb); |
Borislav Petkov | cd9c57c | 2016-11-01 12:52:27 +0100 | [diff] [blame] | 1951 | mce_register_decode_chain(&mce_default_nb); |
Aravind Gopalakrishnan | 43eaa2a | 2015-03-23 10:42:53 -0500 | [diff] [blame] | 1952 | mcheck_vendor_init_severity(); |
Yong Wang | a2202aa | 2009-11-10 09:38:24 +0800 | [diff] [blame] | 1953 | |
Borislav Petkov | cff4c03 | 2017-01-23 19:35:13 +0100 | [diff] [blame] | 1954 | INIT_WORK(&mce_work, mce_gen_pool_process); |
Chen, Gong | 061120a | 2015-08-12 18:29:35 +0200 | [diff] [blame] | 1955 | init_irq_work(&mce_irq_work, mce_irq_work_cb); |
| 1956 | |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 1957 | return 0; |
| 1958 | } |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 1959 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1960 | /* |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 1961 | * mce_syscore: PM support |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1962 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1963 | |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 1964 | /* |
| 1965 | * Disable machine checks on suspend and shutdown. We can't really handle |
| 1966 | * them later. |
| 1967 | */ |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 1968 | static void mce_disable_error_reporting(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 1969 | { |
| 1970 | int i; |
| 1971 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1972 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1973 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1974 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1975 | if (b->init) |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 1976 | wrmsrl(msr_ops.ctl(i), 0); |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1977 | } |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 1978 | return; |
| 1979 | } |
| 1980 | |
| 1981 | static void vendor_disable_error_reporting(void) |
| 1982 | { |
| 1983 | /* |
Pu Wen | ac78bd7 | 2018-09-23 17:36:04 +0800 | [diff] [blame] | 1984 | * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs |
| 1985 | * are socket-wide. |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 1986 | * Disabling them for just a single offlined CPU is bad, since it will |
| 1987 | * inhibit reporting for all shared resources on the socket like the |
| 1988 | * last level cache (LLC), the integrated memory controller (iMC), etc. |
| 1989 | */ |
Yazen Ghannam | ec33838 | 2017-06-13 18:28:34 +0200 | [diff] [blame] | 1990 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || |
Pu Wen | ac78bd7 | 2018-09-23 17:36:04 +0800 | [diff] [blame] | 1991 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || |
Yazen Ghannam | ec33838 | 2017-06-13 18:28:34 +0200 | [diff] [blame] | 1992 | boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 1993 | return; |
| 1994 | |
| 1995 | mce_disable_error_reporting(); |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 1996 | } |
| 1997 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 1998 | static int mce_syscore_suspend(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 1999 | { |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 2000 | vendor_disable_error_reporting(); |
| 2001 | return 0; |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2002 | } |
| 2003 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2004 | static void mce_syscore_shutdown(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2005 | { |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 2006 | vendor_disable_error_reporting(); |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2007 | } |
| 2008 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2009 | /* |
| 2010 | * On resume clear all MCE state. Don't want to see leftovers from the BIOS. |
| 2011 | * Only one CPU is active at this time, the others get re-added later using |
| 2012 | * CPU hotplug: |
| 2013 | */ |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2014 | static void mce_syscore_resume(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2015 | { |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2016 | __mcheck_cpu_init_generic(); |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2017 | __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); |
Aravind Gopalakrishnan | bb91f8c | 2016-04-30 14:33:53 +0200 | [diff] [blame] | 2018 | __mcheck_cpu_init_clear_banks(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2019 | } |
| 2020 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 2021 | static struct syscore_ops mce_syscore_ops = { |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2022 | .suspend = mce_syscore_suspend, |
| 2023 | .shutdown = mce_syscore_shutdown, |
| 2024 | .resume = mce_syscore_resume, |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 2025 | }; |
| 2026 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2027 | /* |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2028 | * mce_device: Sysfs support |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2029 | */ |
| 2030 | |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2031 | static void mce_cpu_restart(void *data) |
| 2032 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2033 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 33edbf0 | 2009-06-15 17:18:45 +0900 | [diff] [blame] | 2034 | return; |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2035 | __mcheck_cpu_init_generic(); |
Aravind Gopalakrishnan | bb91f8c | 2016-04-30 14:33:53 +0200 | [diff] [blame] | 2036 | __mcheck_cpu_init_clear_banks(); |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2037 | __mcheck_cpu_init_timer(); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2038 | } |
| 2039 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2040 | /* Reinit MCEs after user configuration changes */ |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2041 | static void mce_restart(void) |
| 2042 | { |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2043 | mce_timer_delete_all(); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2044 | on_each_cpu(mce_cpu_restart, NULL, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2045 | } |
| 2046 | |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2047 | /* Toggle features for corrected errors */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2048 | static void mce_disable_cmci(void *data) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2049 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2050 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2051 | return; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2052 | cmci_clear(); |
| 2053 | } |
| 2054 | |
| 2055 | static void mce_enable_ce(void *all) |
| 2056 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2057 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2058 | return; |
| 2059 | cmci_reenable(); |
| 2060 | cmci_recheck(); |
| 2061 | if (all) |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2062 | __mcheck_cpu_init_timer(); |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2063 | } |
| 2064 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2065 | static struct bus_type mce_subsys = { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2066 | .name = "machinecheck", |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2067 | .dev_name = "machinecheck", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2068 | }; |
| 2069 | |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2070 | DEFINE_PER_CPU(struct device *, mce_device); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2071 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2072 | static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2073 | { |
| 2074 | return container_of(attr, struct mce_bank, attr); |
| 2075 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2076 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2077 | static ssize_t show_bank(struct device *s, struct device_attribute *attr, |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2078 | char *buf) |
| 2079 | { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2080 | return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2081 | } |
| 2082 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2083 | static ssize_t set_bank(struct device *s, struct device_attribute *attr, |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2084 | const char *buf, size_t size) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2085 | { |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2086 | u64 new; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2087 | |
Daniel Walter | 164109e | 2014-08-08 14:24:03 -0700 | [diff] [blame] | 2088 | if (kstrtou64(buf, 0, &new) < 0) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2089 | return -EINVAL; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2090 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2091 | attr_to_bank(attr)->ctl = new; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2092 | mce_restart(); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2093 | |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2094 | return size; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2095 | } |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2096 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2097 | static ssize_t set_ignore_ce(struct device *s, |
| 2098 | struct device_attribute *attr, |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2099 | const char *buf, size_t size) |
| 2100 | { |
| 2101 | u64 new; |
| 2102 | |
Daniel Walter | 164109e | 2014-08-08 14:24:03 -0700 | [diff] [blame] | 2103 | if (kstrtou64(buf, 0, &new) < 0) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2104 | return -EINVAL; |
| 2105 | |
Seunghun Han | b3b7c47 | 2018-03-06 15:21:43 +0100 | [diff] [blame] | 2106 | mutex_lock(&mce_sysfs_mutex); |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2107 | if (mca_cfg.ignore_ce ^ !!new) { |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2108 | if (new) { |
| 2109 | /* disable ce features */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2110 | mce_timer_delete_all(); |
| 2111 | on_each_cpu(mce_disable_cmci, NULL, 1); |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2112 | mca_cfg.ignore_ce = true; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2113 | } else { |
| 2114 | /* enable ce features */ |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2115 | mca_cfg.ignore_ce = false; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2116 | on_each_cpu(mce_enable_ce, (void *)1, 1); |
| 2117 | } |
| 2118 | } |
Seunghun Han | b3b7c47 | 2018-03-06 15:21:43 +0100 | [diff] [blame] | 2119 | mutex_unlock(&mce_sysfs_mutex); |
| 2120 | |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2121 | return size; |
| 2122 | } |
| 2123 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2124 | static ssize_t set_cmci_disabled(struct device *s, |
| 2125 | struct device_attribute *attr, |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2126 | const char *buf, size_t size) |
| 2127 | { |
| 2128 | u64 new; |
| 2129 | |
Daniel Walter | 164109e | 2014-08-08 14:24:03 -0700 | [diff] [blame] | 2130 | if (kstrtou64(buf, 0, &new) < 0) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2131 | return -EINVAL; |
| 2132 | |
Seunghun Han | b3b7c47 | 2018-03-06 15:21:43 +0100 | [diff] [blame] | 2133 | mutex_lock(&mce_sysfs_mutex); |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2134 | if (mca_cfg.cmci_disabled ^ !!new) { |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2135 | if (new) { |
| 2136 | /* disable cmci */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2137 | on_each_cpu(mce_disable_cmci, NULL, 1); |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2138 | mca_cfg.cmci_disabled = true; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2139 | } else { |
| 2140 | /* enable cmci */ |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2141 | mca_cfg.cmci_disabled = false; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2142 | on_each_cpu(mce_enable_ce, NULL, 1); |
| 2143 | } |
| 2144 | } |
Seunghun Han | b3b7c47 | 2018-03-06 15:21:43 +0100 | [diff] [blame] | 2145 | mutex_unlock(&mce_sysfs_mutex); |
| 2146 | |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2147 | return size; |
| 2148 | } |
| 2149 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2150 | static ssize_t store_int_with_restart(struct device *s, |
| 2151 | struct device_attribute *attr, |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2152 | const char *buf, size_t size) |
| 2153 | { |
Seunghun Han | b3b7c47 | 2018-03-06 15:21:43 +0100 | [diff] [blame] | 2154 | unsigned long old_check_interval = check_interval; |
| 2155 | ssize_t ret = device_store_ulong(s, attr, buf, size); |
| 2156 | |
| 2157 | if (check_interval == old_check_interval) |
| 2158 | return ret; |
| 2159 | |
Seunghun Han | b3b7c47 | 2018-03-06 15:21:43 +0100 | [diff] [blame] | 2160 | mutex_lock(&mce_sysfs_mutex); |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2161 | mce_restart(); |
Seunghun Han | b3b7c47 | 2018-03-06 15:21:43 +0100 | [diff] [blame] | 2162 | mutex_unlock(&mce_sysfs_mutex); |
| 2163 | |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2164 | return ret; |
| 2165 | } |
| 2166 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2167 | static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 2168 | static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2169 | static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2170 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2171 | static struct dev_ext_attribute dev_attr_check_interval = { |
| 2172 | __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2173 | &check_interval |
| 2174 | }; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2175 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2176 | static struct dev_ext_attribute dev_attr_ignore_ce = { |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2177 | __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), |
| 2178 | &mca_cfg.ignore_ce |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2179 | }; |
| 2180 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2181 | static struct dev_ext_attribute dev_attr_cmci_disabled = { |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2182 | __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), |
| 2183 | &mca_cfg.cmci_disabled |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2184 | }; |
| 2185 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2186 | static struct device_attribute *mce_device_attrs[] = { |
| 2187 | &dev_attr_tolerant.attr, |
| 2188 | &dev_attr_check_interval.attr, |
Tony Luck | 5de97c9 | 2017-03-27 11:33:03 +0200 | [diff] [blame] | 2189 | #ifdef CONFIG_X86_MCELOG_LEGACY |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2190 | &dev_attr_trigger, |
Tony Luck | 5de97c9 | 2017-03-27 11:33:03 +0200 | [diff] [blame] | 2191 | #endif |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2192 | &dev_attr_monarch_timeout.attr, |
| 2193 | &dev_attr_dont_log_ce.attr, |
| 2194 | &dev_attr_ignore_ce.attr, |
| 2195 | &dev_attr_cmci_disabled.attr, |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2196 | NULL |
| 2197 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2198 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2199 | static cpumask_var_t mce_device_initialized; |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2200 | |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2201 | static void mce_device_release(struct device *dev) |
| 2202 | { |
| 2203 | kfree(dev); |
| 2204 | } |
| 2205 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2206 | /* Per cpu device init. All of the cpus still share the same ctrl bank: */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 2207 | static int mce_device_create(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2208 | { |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2209 | struct device *dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2210 | int err; |
Hidetoshi Seto | b1f49f9 | 2009-06-18 14:53:24 +0900 | [diff] [blame] | 2211 | int i, j; |
Mike Travis | 92cb761 | 2007-10-19 20:35:04 +0200 | [diff] [blame] | 2212 | |
Andreas Herrmann | 9036755 | 2007-11-07 02:12:58 +0100 | [diff] [blame] | 2213 | if (!mce_available(&boot_cpu_data)) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2214 | return -EIO; |
| 2215 | |
Sebastian Andrzej Siewior | 7f34b93 | 2016-11-10 18:44:43 +0100 | [diff] [blame] | 2216 | dev = per_cpu(mce_device, cpu); |
| 2217 | if (dev) |
| 2218 | return 0; |
| 2219 | |
Jordan Borgner | 0e96f31 | 2018-10-28 12:58:28 +0000 | [diff] [blame] | 2220 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2221 | if (!dev) |
| 2222 | return -ENOMEM; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2223 | dev->id = cpu; |
| 2224 | dev->bus = &mce_subsys; |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2225 | dev->release = &mce_device_release; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2226 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2227 | err = device_register(dev); |
Levente Kurusa | 853d9b1 | 2013-11-29 21:28:48 +0100 | [diff] [blame] | 2228 | if (err) { |
| 2229 | put_device(dev); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2230 | return err; |
Levente Kurusa | 853d9b1 | 2013-11-29 21:28:48 +0100 | [diff] [blame] | 2231 | } |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2232 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2233 | for (i = 0; mce_device_attrs[i]; i++) { |
| 2234 | err = device_create_file(dev, mce_device_attrs[i]); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2235 | if (err) |
| 2236 | goto error; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2237 | } |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2238 | for (j = 0; j < mca_cfg.banks; j++) { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2239 | err = device_create_file(dev, &mce_banks[j].attr); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2240 | if (err) |
| 2241 | goto error2; |
| 2242 | } |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2243 | cpumask_set_cpu(cpu, mce_device_initialized); |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2244 | per_cpu(mce_device, cpu) = dev; |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2245 | |
| 2246 | return 0; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2247 | error2: |
Hidetoshi Seto | b1f49f9 | 2009-06-18 14:53:24 +0900 | [diff] [blame] | 2248 | while (--j >= 0) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2249 | device_remove_file(dev, &mce_banks[j].attr); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2250 | error: |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2251 | while (--i >= 0) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2252 | device_remove_file(dev, mce_device_attrs[i]); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2253 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2254 | device_unregister(dev); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2255 | |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2256 | return err; |
| 2257 | } |
| 2258 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 2259 | static void mce_device_remove(unsigned int cpu) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2260 | { |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2261 | struct device *dev = per_cpu(mce_device, cpu); |
Shaohua Li | 73ca535 | 2006-01-11 22:43:06 +0100 | [diff] [blame] | 2262 | int i; |
| 2263 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2264 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2265 | return; |
| 2266 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2267 | for (i = 0; mce_device_attrs[i]; i++) |
| 2268 | device_remove_file(dev, mce_device_attrs[i]); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2269 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2270 | for (i = 0; i < mca_cfg.banks; i++) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2271 | device_remove_file(dev, &mce_banks[i].attr); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2272 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2273 | device_unregister(dev); |
| 2274 | cpumask_clear_cpu(cpu, mce_device_initialized); |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2275 | per_cpu(mce_device, cpu) = NULL; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2276 | } |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2277 | |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2278 | /* Make sure there are no machine checks on offlined CPUs. */ |
Sebastian Andrzej Siewior | 39f152f | 2016-11-10 18:44:45 +0100 | [diff] [blame] | 2279 | static void mce_disable_cpu(void) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2280 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2281 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2282 | return; |
Hidetoshi Seto | 767df1b | 2009-11-26 17:29:02 +0900 | [diff] [blame] | 2283 | |
Sebastian Andrzej Siewior | 39f152f | 2016-11-10 18:44:45 +0100 | [diff] [blame] | 2284 | if (!cpuhp_tasks_frozen) |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2285 | cmci_clear(); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 2286 | |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 2287 | vendor_disable_error_reporting(); |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2288 | } |
| 2289 | |
Sebastian Andrzej Siewior | 39f152f | 2016-11-10 18:44:45 +0100 | [diff] [blame] | 2290 | static void mce_reenable_cpu(void) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2291 | { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2292 | int i; |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2293 | |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2294 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2295 | return; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2296 | |
Sebastian Andrzej Siewior | 39f152f | 2016-11-10 18:44:45 +0100 | [diff] [blame] | 2297 | if (!cpuhp_tasks_frozen) |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2298 | cmci_reenable(); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2299 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2300 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 2301 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2302 | if (b->init) |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 2303 | wrmsrl(msr_ops.ctl(i), b->ctl); |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 2304 | } |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2305 | } |
| 2306 | |
Sebastian Andrzej Siewior | 0e285d3 | 2016-11-10 18:44:47 +0100 | [diff] [blame] | 2307 | static int mce_cpu_dead(unsigned int cpu) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2308 | { |
Sebastian Andrzej Siewior | 0e285d3 | 2016-11-10 18:44:47 +0100 | [diff] [blame] | 2309 | mce_intel_hcpu_update(cpu); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2310 | |
Sebastian Andrzej Siewior | 0e285d3 | 2016-11-10 18:44:47 +0100 | [diff] [blame] | 2311 | /* intentionally ignoring frozen here */ |
| 2312 | if (!cpuhp_tasks_frozen) |
| 2313 | cmci_rediscover(); |
| 2314 | return 0; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2315 | } |
| 2316 | |
Sebastian Andrzej Siewior | 8c0eeac | 2016-11-10 18:44:46 +0100 | [diff] [blame] | 2317 | static int mce_cpu_online(unsigned int cpu) |
| 2318 | { |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 2319 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
Sebastian Andrzej Siewior | 8c0eeac | 2016-11-10 18:44:46 +0100 | [diff] [blame] | 2320 | int ret; |
| 2321 | |
| 2322 | mce_device_create(cpu); |
| 2323 | |
| 2324 | ret = mce_threshold_create_device(cpu); |
| 2325 | if (ret) { |
| 2326 | mce_device_remove(cpu); |
| 2327 | return ret; |
| 2328 | } |
| 2329 | mce_reenable_cpu(); |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 2330 | mce_start_timer(t); |
Sebastian Andrzej Siewior | 8c0eeac | 2016-11-10 18:44:46 +0100 | [diff] [blame] | 2331 | return 0; |
| 2332 | } |
| 2333 | |
| 2334 | static int mce_cpu_pre_down(unsigned int cpu) |
| 2335 | { |
Thomas Gleixner | 0becc0a | 2017-01-31 09:37:34 +0100 | [diff] [blame] | 2336 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
Sebastian Andrzej Siewior | 8c0eeac | 2016-11-10 18:44:46 +0100 | [diff] [blame] | 2337 | |
| 2338 | mce_disable_cpu(); |
| 2339 | del_timer_sync(t); |
| 2340 | mce_threshold_remove_device(cpu); |
| 2341 | mce_device_remove(cpu); |
| 2342 | return 0; |
| 2343 | } |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2344 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2345 | static __init void mce_init_banks(void) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2346 | { |
| 2347 | int i; |
| 2348 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2349 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2350 | struct mce_bank *b = &mce_banks[i]; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2351 | struct device_attribute *a = &b->attr; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2352 | |
Eric W. Biederman | a07e415 | 2010-02-11 15:23:05 -0800 | [diff] [blame] | 2353 | sysfs_attr_init(&a->attr); |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2354 | a->attr.name = b->attrname; |
| 2355 | snprintf(b->attrname, ATTR_LEN, "bank%d", i); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2356 | |
| 2357 | a->attr.mode = 0644; |
| 2358 | a->show = show_bank; |
| 2359 | a->store = set_bank; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2360 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2361 | } |
| 2362 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2363 | static __init int mcheck_init_device(void) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2364 | { |
| 2365 | int err; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2366 | |
Kirill A. Shutemov | c65e774 | 2018-02-14 14:16:53 +0300 | [diff] [blame] | 2367 | /* |
| 2368 | * Check if we have a spare virtual bit. This will only become |
| 2369 | * a problem if/when we move beyond 5-level page tables. |
| 2370 | */ |
| 2371 | MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63); |
| 2372 | |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2373 | if (!mce_available(&boot_cpu_data)) { |
| 2374 | err = -EIO; |
| 2375 | goto err_out; |
| 2376 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2377 | |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2378 | if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { |
| 2379 | err = -ENOMEM; |
| 2380 | goto err_out; |
| 2381 | } |
Rusty Russell | 996867d | 2009-03-13 14:49:51 +1030 | [diff] [blame] | 2382 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2383 | mce_init_banks(); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2384 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2385 | err = subsys_system_register(&mce_subsys, NULL); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2386 | if (err) |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2387 | goto err_out_mem; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2388 | |
Sebastian Andrzej Siewior | 0e285d3 | 2016-11-10 18:44:47 +0100 | [diff] [blame] | 2389 | err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL, |
| 2390 | mce_cpu_dead); |
| 2391 | if (err) |
| 2392 | goto err_out_mem; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2393 | |
Sebastian Andrzej Siewior | 8c0eeac | 2016-11-10 18:44:46 +0100 | [diff] [blame] | 2394 | err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online", |
| 2395 | mce_cpu_online, mce_cpu_pre_down); |
| 2396 | if (err < 0) |
Sebastian Andrzej Siewior | 0e285d3 | 2016-11-10 18:44:47 +0100 | [diff] [blame] | 2397 | goto err_out_online; |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 2398 | |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2399 | register_syscore_ops(&mce_syscore_ops); |
| 2400 | |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2401 | return 0; |
| 2402 | |
Sebastian Andrzej Siewior | 0e285d3 | 2016-11-10 18:44:47 +0100 | [diff] [blame] | 2403 | err_out_online: |
| 2404 | cpuhp_remove_state(CPUHP_X86_MCE_DEAD); |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2405 | |
| 2406 | err_out_mem: |
| 2407 | free_cpumask_var(mce_device_initialized); |
| 2408 | |
| 2409 | err_out: |
Tony Luck | 5de97c9 | 2017-03-27 11:33:03 +0200 | [diff] [blame] | 2410 | pr_err("Unable to init MCE device (rc: %d)\n", err); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2411 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2412 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2413 | } |
Liu, Jinsong | cef12ee | 2012-06-07 19:56:51 +0800 | [diff] [blame] | 2414 | device_initcall_sync(mcheck_init_device); |
Ingo Molnar | a988d33 | 2009-04-08 12:31:25 +0200 | [diff] [blame] | 2415 | |
Andi Kleen | d7c3c9a | 2009-04-28 23:07:25 +0200 | [diff] [blame] | 2416 | /* |
| 2417 | * Old style boot options parsing. Only for compatibility. |
| 2418 | */ |
| 2419 | static int __init mcheck_disable(char *str) |
| 2420 | { |
Borislav Petkov | 0993394 | 2018-02-21 11:18:54 +0100 | [diff] [blame] | 2421 | mca_cfg.disabled = 1; |
Andi Kleen | d7c3c9a | 2009-04-28 23:07:25 +0200 | [diff] [blame] | 2422 | return 1; |
| 2423 | } |
| 2424 | __setup("nomce", mcheck_disable); |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 2425 | |
| 2426 | #ifdef CONFIG_DEBUG_FS |
| 2427 | struct dentry *mce_get_debugfs_dir(void) |
| 2428 | { |
| 2429 | static struct dentry *dmce; |
| 2430 | |
| 2431 | if (!dmce) |
| 2432 | dmce = debugfs_create_dir("mce", NULL); |
| 2433 | |
| 2434 | return dmce; |
| 2435 | } |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 2436 | |
| 2437 | static void mce_reset(void) |
| 2438 | { |
| 2439 | cpu_missing = 0; |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 2440 | atomic_set(&mce_fake_panicked, 0); |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 2441 | atomic_set(&mce_executing, 0); |
| 2442 | atomic_set(&mce_callin, 0); |
| 2443 | atomic_set(&global_nwo, 0); |
| 2444 | } |
| 2445 | |
| 2446 | static int fake_panic_get(void *data, u64 *val) |
| 2447 | { |
| 2448 | *val = fake_panic; |
| 2449 | return 0; |
| 2450 | } |
| 2451 | |
| 2452 | static int fake_panic_set(void *data, u64 val) |
| 2453 | { |
| 2454 | mce_reset(); |
| 2455 | fake_panic = val; |
| 2456 | return 0; |
| 2457 | } |
| 2458 | |
| 2459 | DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, |
| 2460 | fake_panic_set, "%llu\n"); |
| 2461 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2462 | static int __init mcheck_debugfs_init(void) |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 2463 | { |
| 2464 | struct dentry *dmce, *ffake_panic; |
| 2465 | |
| 2466 | dmce = mce_get_debugfs_dir(); |
| 2467 | if (!dmce) |
| 2468 | return -ENOMEM; |
| 2469 | ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL, |
| 2470 | &fake_panic_fops); |
| 2471 | if (!ffake_panic) |
| 2472 | return -ENOMEM; |
| 2473 | |
| 2474 | return 0; |
| 2475 | } |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 2476 | #else |
| 2477 | static int __init mcheck_debugfs_init(void) { return -EINVAL; } |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 2478 | #endif |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 2479 | |
Tony Luck | 3637efb | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 2480 | DEFINE_STATIC_KEY_FALSE(mcsafe_key); |
| 2481 | EXPORT_SYMBOL_GPL(mcsafe_key); |
| 2482 | |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 2483 | static int __init mcheck_late_init(void) |
| 2484 | { |
Tony Luck | 3637efb | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 2485 | if (mca_cfg.recovery) |
| 2486 | static_branch_inc(&mcsafe_key); |
| 2487 | |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 2488 | mcheck_debugfs_init(); |
Borislav Petkov | 011d826 | 2017-03-27 11:33:02 +0200 | [diff] [blame] | 2489 | cec_init(); |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 2490 | |
| 2491 | /* |
| 2492 | * Flush out everything that has been logged during early boot, now that |
| 2493 | * everything has been initialized (workqueues, decoders, ...). |
| 2494 | */ |
| 2495 | mce_schedule_work(); |
| 2496 | |
| 2497 | return 0; |
| 2498 | } |
| 2499 | late_initcall(mcheck_late_init); |